1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <linux/seq_file.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/major.h>
17#include <linux/fs.h>
18#include <linux/err.h>
19#include <linux/ioctl.h>
20#include <linux/init.h>
21#include <linux/of.h>
22#include <linux/proc_fs.h>
23#include <linux/idr.h>
24#include <linux/backing-dev.h>
25#include <linux/gfp.h>
26#include <linux/slab.h>
27#include <linux/reboot.h>
28#include <linux/leds.h>
29#include <linux/debugfs.h>
30#include <linux/nvmem-provider.h>
31
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/partitions.h>
34
35#include "mtdcore.h"
36
37struct backing_dev_info *mtd_bdi;
38
39#ifdef CONFIG_PM_SLEEP
40
41static int mtd_cls_suspend(struct device *dev)
42{
43 struct mtd_info *mtd = dev_get_drvdata(dev);
44
45 return mtd ? mtd_suspend(mtd) : 0;
46}
47
48static int mtd_cls_resume(struct device *dev)
49{
50 struct mtd_info *mtd = dev_get_drvdata(dev);
51
52 if (mtd)
53 mtd_resume(mtd);
54 return 0;
55}
56
57static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59#else
60#define MTD_CLS_PM_OPS NULL
61#endif
62
63static struct class mtd_class = {
64 .name = "mtd",
65 .owner = THIS_MODULE,
66 .pm = MTD_CLS_PM_OPS,
67};
68
69static DEFINE_IDR(mtd_idr);
70
71
72
73DEFINE_MUTEX(mtd_table_mutex);
74EXPORT_SYMBOL_GPL(mtd_table_mutex);
75
76struct mtd_info *__mtd_next_device(int i)
77{
78 return idr_get_next(&mtd_idr, &i);
79}
80EXPORT_SYMBOL_GPL(__mtd_next_device);
81
82static LIST_HEAD(mtd_notifiers);
83
84
85#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86
87
88
89
90static void mtd_release(struct device *dev)
91{
92 struct mtd_info *mtd = dev_get_drvdata(dev);
93 dev_t index = MTD_DEVT(mtd->index);
94
95
96 device_destroy(&mtd_class, index + 1);
97}
98
99#define MTD_DEVICE_ATTR_RO(name) \
100static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
101
102#define MTD_DEVICE_ATTR_RW(name) \
103static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
104
105static ssize_t mtd_type_show(struct device *dev,
106 struct device_attribute *attr, char *buf)
107{
108 struct mtd_info *mtd = dev_get_drvdata(dev);
109 char *type;
110
111 switch (mtd->type) {
112 case MTD_ABSENT:
113 type = "absent";
114 break;
115 case MTD_RAM:
116 type = "ram";
117 break;
118 case MTD_ROM:
119 type = "rom";
120 break;
121 case MTD_NORFLASH:
122 type = "nor";
123 break;
124 case MTD_NANDFLASH:
125 type = "nand";
126 break;
127 case MTD_DATAFLASH:
128 type = "dataflash";
129 break;
130 case MTD_UBIVOLUME:
131 type = "ubi";
132 break;
133 case MTD_MLCNANDFLASH:
134 type = "mlc-nand";
135 break;
136 default:
137 type = "unknown";
138 }
139
140 return sysfs_emit(buf, "%s\n", type);
141}
142MTD_DEVICE_ATTR_RO(type);
143
144static ssize_t mtd_flags_show(struct device *dev,
145 struct device_attribute *attr, char *buf)
146{
147 struct mtd_info *mtd = dev_get_drvdata(dev);
148
149 return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
150}
151MTD_DEVICE_ATTR_RO(flags);
152
153static ssize_t mtd_size_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 struct mtd_info *mtd = dev_get_drvdata(dev);
157
158 return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
159}
160MTD_DEVICE_ATTR_RO(size);
161
162static ssize_t mtd_erasesize_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
164{
165 struct mtd_info *mtd = dev_get_drvdata(dev);
166
167 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
168}
169MTD_DEVICE_ATTR_RO(erasesize);
170
171static ssize_t mtd_writesize_show(struct device *dev,
172 struct device_attribute *attr, char *buf)
173{
174 struct mtd_info *mtd = dev_get_drvdata(dev);
175
176 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
177}
178MTD_DEVICE_ATTR_RO(writesize);
179
180static ssize_t mtd_subpagesize_show(struct device *dev,
181 struct device_attribute *attr, char *buf)
182{
183 struct mtd_info *mtd = dev_get_drvdata(dev);
184 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
185
186 return sysfs_emit(buf, "%u\n", subpagesize);
187}
188MTD_DEVICE_ATTR_RO(subpagesize);
189
190static ssize_t mtd_oobsize_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192{
193 struct mtd_info *mtd = dev_get_drvdata(dev);
194
195 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
196}
197MTD_DEVICE_ATTR_RO(oobsize);
198
199static ssize_t mtd_oobavail_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
201{
202 struct mtd_info *mtd = dev_get_drvdata(dev);
203
204 return sysfs_emit(buf, "%u\n", mtd->oobavail);
205}
206MTD_DEVICE_ATTR_RO(oobavail);
207
208static ssize_t mtd_numeraseregions_show(struct device *dev,
209 struct device_attribute *attr, char *buf)
210{
211 struct mtd_info *mtd = dev_get_drvdata(dev);
212
213 return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
214}
215MTD_DEVICE_ATTR_RO(numeraseregions);
216
217static ssize_t mtd_name_show(struct device *dev,
218 struct device_attribute *attr, char *buf)
219{
220 struct mtd_info *mtd = dev_get_drvdata(dev);
221
222 return sysfs_emit(buf, "%s\n", mtd->name);
223}
224MTD_DEVICE_ATTR_RO(name);
225
226static ssize_t mtd_ecc_strength_show(struct device *dev,
227 struct device_attribute *attr, char *buf)
228{
229 struct mtd_info *mtd = dev_get_drvdata(dev);
230
231 return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
232}
233MTD_DEVICE_ATTR_RO(ecc_strength);
234
235static ssize_t mtd_bitflip_threshold_show(struct device *dev,
236 struct device_attribute *attr,
237 char *buf)
238{
239 struct mtd_info *mtd = dev_get_drvdata(dev);
240
241 return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
242}
243
244static ssize_t mtd_bitflip_threshold_store(struct device *dev,
245 struct device_attribute *attr,
246 const char *buf, size_t count)
247{
248 struct mtd_info *mtd = dev_get_drvdata(dev);
249 unsigned int bitflip_threshold;
250 int retval;
251
252 retval = kstrtouint(buf, 0, &bitflip_threshold);
253 if (retval)
254 return retval;
255
256 mtd->bitflip_threshold = bitflip_threshold;
257 return count;
258}
259MTD_DEVICE_ATTR_RW(bitflip_threshold);
260
261static ssize_t mtd_ecc_step_size_show(struct device *dev,
262 struct device_attribute *attr, char *buf)
263{
264 struct mtd_info *mtd = dev_get_drvdata(dev);
265
266 return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
267
268}
269MTD_DEVICE_ATTR_RO(ecc_step_size);
270
271static ssize_t mtd_corrected_bits_show(struct device *dev,
272 struct device_attribute *attr, char *buf)
273{
274 struct mtd_info *mtd = dev_get_drvdata(dev);
275 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
276
277 return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
278}
279MTD_DEVICE_ATTR_RO(corrected_bits);
280
281static ssize_t mtd_ecc_failures_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283{
284 struct mtd_info *mtd = dev_get_drvdata(dev);
285 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
286
287 return sysfs_emit(buf, "%u\n", ecc_stats->failed);
288}
289MTD_DEVICE_ATTR_RO(ecc_failures);
290
291static ssize_t mtd_bad_blocks_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct mtd_info *mtd = dev_get_drvdata(dev);
295 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296
297 return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
298}
299MTD_DEVICE_ATTR_RO(bad_blocks);
300
301static ssize_t mtd_bbt_blocks_show(struct device *dev,
302 struct device_attribute *attr, char *buf)
303{
304 struct mtd_info *mtd = dev_get_drvdata(dev);
305 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306
307 return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
308}
309MTD_DEVICE_ATTR_RO(bbt_blocks);
310
311static struct attribute *mtd_attrs[] = {
312 &dev_attr_type.attr,
313 &dev_attr_flags.attr,
314 &dev_attr_size.attr,
315 &dev_attr_erasesize.attr,
316 &dev_attr_writesize.attr,
317 &dev_attr_subpagesize.attr,
318 &dev_attr_oobsize.attr,
319 &dev_attr_oobavail.attr,
320 &dev_attr_numeraseregions.attr,
321 &dev_attr_name.attr,
322 &dev_attr_ecc_strength.attr,
323 &dev_attr_ecc_step_size.attr,
324 &dev_attr_corrected_bits.attr,
325 &dev_attr_ecc_failures.attr,
326 &dev_attr_bad_blocks.attr,
327 &dev_attr_bbt_blocks.attr,
328 &dev_attr_bitflip_threshold.attr,
329 NULL,
330};
331ATTRIBUTE_GROUPS(mtd);
332
333static const struct device_type mtd_devtype = {
334 .name = "mtd",
335 .groups = mtd_groups,
336 .release = mtd_release,
337};
338
339static int mtd_partid_debug_show(struct seq_file *s, void *p)
340{
341 struct mtd_info *mtd = s->private;
342
343 seq_printf(s, "%s\n", mtd->dbg.partid);
344
345 return 0;
346}
347
348DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
349
350static int mtd_partname_debug_show(struct seq_file *s, void *p)
351{
352 struct mtd_info *mtd = s->private;
353
354 seq_printf(s, "%s\n", mtd->dbg.partname);
355
356 return 0;
357}
358
359DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
360
361static struct dentry *dfs_dir_mtd;
362
363static void mtd_debugfs_populate(struct mtd_info *mtd)
364{
365 struct mtd_info *master = mtd_get_master(mtd);
366 struct device *dev = &mtd->dev;
367 struct dentry *root;
368
369 if (IS_ERR_OR_NULL(dfs_dir_mtd))
370 return;
371
372 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
373 mtd->dbg.dfs_dir = root;
374
375 if (master->dbg.partid)
376 debugfs_create_file("partid", 0400, root, master,
377 &mtd_partid_debug_fops);
378
379 if (master->dbg.partname)
380 debugfs_create_file("partname", 0400, root, master,
381 &mtd_partname_debug_fops);
382}
383
384#ifndef CONFIG_MMU
385unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
386{
387 switch (mtd->type) {
388 case MTD_RAM:
389 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
390 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
391 case MTD_ROM:
392 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
393 NOMMU_MAP_READ;
394 default:
395 return NOMMU_MAP_COPY;
396 }
397}
398EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
399#endif
400
401static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
402 void *cmd)
403{
404 struct mtd_info *mtd;
405
406 mtd = container_of(n, struct mtd_info, reboot_notifier);
407 mtd->_reboot(mtd);
408
409 return NOTIFY_DONE;
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
439 struct mtd_pairing_info *info)
440{
441 struct mtd_info *master = mtd_get_master(mtd);
442 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
443
444 if (wunit < 0 || wunit >= npairs)
445 return -EINVAL;
446
447 if (master->pairing && master->pairing->get_info)
448 return master->pairing->get_info(master, wunit, info);
449
450 info->group = 0;
451 info->pair = wunit;
452
453 return 0;
454}
455EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
482 const struct mtd_pairing_info *info)
483{
484 struct mtd_info *master = mtd_get_master(mtd);
485 int ngroups = mtd_pairing_groups(master);
486 int npairs = mtd_wunit_per_eb(master) / ngroups;
487
488 if (!info || info->pair < 0 || info->pair >= npairs ||
489 info->group < 0 || info->group >= ngroups)
490 return -EINVAL;
491
492 if (master->pairing && master->pairing->get_wunit)
493 return mtd->pairing->get_wunit(master, info);
494
495 return info->pair;
496}
497EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
498
499
500
501
502
503
504
505
506
507
508
509int mtd_pairing_groups(struct mtd_info *mtd)
510{
511 struct mtd_info *master = mtd_get_master(mtd);
512
513 if (!master->pairing || !master->pairing->ngroups)
514 return 1;
515
516 return master->pairing->ngroups;
517}
518EXPORT_SYMBOL_GPL(mtd_pairing_groups);
519
520static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
521 void *val, size_t bytes)
522{
523 struct mtd_info *mtd = priv;
524 size_t retlen;
525 int err;
526
527 err = mtd_read(mtd, offset, bytes, &retlen, val);
528 if (err && err != -EUCLEAN)
529 return err;
530
531 return retlen == bytes ? 0 : -EIO;
532}
533
534static int mtd_nvmem_add(struct mtd_info *mtd)
535{
536 struct device_node *node = mtd_get_of_node(mtd);
537 struct nvmem_config config = {};
538
539 config.id = -1;
540 config.dev = &mtd->dev;
541 config.name = dev_name(&mtd->dev);
542 config.owner = THIS_MODULE;
543 config.reg_read = mtd_nvmem_reg_read;
544 config.size = mtd->size;
545 config.word_size = 1;
546 config.stride = 1;
547 config.read_only = true;
548 config.root_only = true;
549 config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
550 config.priv = mtd;
551
552 mtd->nvmem = nvmem_register(&config);
553 if (IS_ERR(mtd->nvmem)) {
554
555 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
556 mtd->nvmem = NULL;
557 } else {
558 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
559 return PTR_ERR(mtd->nvmem);
560 }
561 }
562
563 return 0;
564}
565
566
567
568
569
570
571
572
573
574
575int add_mtd_device(struct mtd_info *mtd)
576{
577 struct mtd_info *master = mtd_get_master(mtd);
578 struct mtd_notifier *not;
579 int i, error;
580
581
582
583
584
585
586 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
587 return -EEXIST;
588
589 BUG_ON(mtd->writesize == 0);
590
591
592
593
594
595 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
596 (mtd->_read && mtd->_read_oob)))
597 return -EINVAL;
598
599 if (WARN_ON((!mtd->erasesize || !master->_erase) &&
600 !(mtd->flags & MTD_NO_ERASE)))
601 return -EINVAL;
602
603
604
605
606
607
608
609
610
611 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
612 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
613 !master->pairing || master->_writev))
614 return -EINVAL;
615
616 mutex_lock(&mtd_table_mutex);
617
618 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
619 if (i < 0) {
620 error = i;
621 goto fail_locked;
622 }
623
624 mtd->index = i;
625 mtd->usecount = 0;
626
627
628 if (mtd->bitflip_threshold == 0)
629 mtd->bitflip_threshold = mtd->ecc_strength;
630
631 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
632 int ngroups = mtd_pairing_groups(master);
633
634 mtd->erasesize /= ngroups;
635 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
636 mtd->erasesize;
637 }
638
639 if (is_power_of_2(mtd->erasesize))
640 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
641 else
642 mtd->erasesize_shift = 0;
643
644 if (is_power_of_2(mtd->writesize))
645 mtd->writesize_shift = ffs(mtd->writesize) - 1;
646 else
647 mtd->writesize_shift = 0;
648
649 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
650 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
651
652
653 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
654 error = mtd_unlock(mtd, 0, mtd->size);
655 if (error && error != -EOPNOTSUPP)
656 printk(KERN_WARNING
657 "%s: unlock failed, writes may not work\n",
658 mtd->name);
659
660 error = 0;
661 }
662
663
664
665
666 mtd->dev.type = &mtd_devtype;
667 mtd->dev.class = &mtd_class;
668 mtd->dev.devt = MTD_DEVT(i);
669 dev_set_name(&mtd->dev, "mtd%d", i);
670 dev_set_drvdata(&mtd->dev, mtd);
671 of_node_get(mtd_get_of_node(mtd));
672 error = device_register(&mtd->dev);
673 if (error)
674 goto fail_added;
675
676
677 error = mtd_nvmem_add(mtd);
678 if (error)
679 goto fail_nvmem_add;
680
681 mtd_debugfs_populate(mtd);
682
683 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
684 "mtd%dro", i);
685
686 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
687
688
689 list_for_each_entry(not, &mtd_notifiers, list)
690 not->add(mtd);
691
692 mutex_unlock(&mtd_table_mutex);
693
694
695
696
697 __module_get(THIS_MODULE);
698 return 0;
699
700fail_nvmem_add:
701 device_unregister(&mtd->dev);
702fail_added:
703 of_node_put(mtd_get_of_node(mtd));
704 idr_remove(&mtd_idr, i);
705fail_locked:
706 mutex_unlock(&mtd_table_mutex);
707 return error;
708}
709
710
711
712
713
714
715
716
717
718
719
720int del_mtd_device(struct mtd_info *mtd)
721{
722 int ret;
723 struct mtd_notifier *not;
724
725 mutex_lock(&mtd_table_mutex);
726
727 debugfs_remove_recursive(mtd->dbg.dfs_dir);
728
729 if (idr_find(&mtd_idr, mtd->index) != mtd) {
730 ret = -ENODEV;
731 goto out_error;
732 }
733
734
735
736 list_for_each_entry(not, &mtd_notifiers, list)
737 not->remove(mtd);
738
739 if (mtd->usecount) {
740 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
741 mtd->index, mtd->name, mtd->usecount);
742 ret = -EBUSY;
743 } else {
744
745 if (mtd->nvmem)
746 nvmem_unregister(mtd->nvmem);
747
748 device_unregister(&mtd->dev);
749
750 idr_remove(&mtd_idr, mtd->index);
751 of_node_put(mtd_get_of_node(mtd));
752
753 module_put(THIS_MODULE);
754 ret = 0;
755 }
756
757out_error:
758 mutex_unlock(&mtd_table_mutex);
759 return ret;
760}
761
762
763
764
765
766static void mtd_set_dev_defaults(struct mtd_info *mtd)
767{
768 if (mtd->dev.parent) {
769 if (!mtd->owner && mtd->dev.parent->driver)
770 mtd->owner = mtd->dev.parent->driver->owner;
771 if (!mtd->name)
772 mtd->name = dev_name(mtd->dev.parent);
773 } else {
774 pr_debug("mtd device won't show a device symlink in sysfs\n");
775 }
776
777 INIT_LIST_HEAD(&mtd->partitions);
778 mutex_init(&mtd->master.partitions_lock);
779 mutex_init(&mtd->master.chrdev_lock);
780}
781
782static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
783{
784 struct otp_info *info;
785 ssize_t size = 0;
786 unsigned int i;
787 size_t retlen;
788 int ret;
789
790 info = kmalloc(PAGE_SIZE, GFP_KERNEL);
791 if (!info)
792 return -ENOMEM;
793
794 if (is_user)
795 ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
796 else
797 ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
798 if (ret)
799 goto err;
800
801 for (i = 0; i < retlen / sizeof(*info); i++)
802 size += info[i].length;
803
804 kfree(info);
805 return size;
806
807err:
808 kfree(info);
809
810
811 return ret == -ENODATA ? 0 : ret;
812}
813
814static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
815 const char *compatible,
816 int size,
817 nvmem_reg_read_t reg_read)
818{
819 struct nvmem_device *nvmem = NULL;
820 struct nvmem_config config = {};
821 struct device_node *np;
822
823
824 np = of_get_compatible_child(mtd->dev.of_node, compatible);
825
826
827 config.dev = mtd->dev.parent;
828
829 config.name = compatible;
830 config.id = NVMEM_DEVID_NONE;
831 config.owner = THIS_MODULE;
832 config.type = NVMEM_TYPE_OTP;
833 config.root_only = true;
834 config.reg_read = reg_read;
835 config.size = size;
836 config.of_node = np;
837 config.priv = mtd;
838
839 nvmem = nvmem_register(&config);
840
841 if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
842 nvmem = NULL;
843
844 of_node_put(np);
845
846 return nvmem;
847}
848
849static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
850 void *val, size_t bytes)
851{
852 struct mtd_info *mtd = priv;
853 size_t retlen;
854 int ret;
855
856 ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
857 if (ret)
858 return ret;
859
860 return retlen == bytes ? 0 : -EIO;
861}
862
863static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
864 void *val, size_t bytes)
865{
866 struct mtd_info *mtd = priv;
867 size_t retlen;
868 int ret;
869
870 ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
871 if (ret)
872 return ret;
873
874 return retlen == bytes ? 0 : -EIO;
875}
876
877static int mtd_otp_nvmem_add(struct mtd_info *mtd)
878{
879 struct nvmem_device *nvmem;
880 ssize_t size;
881 int err;
882
883 if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
884 size = mtd_otp_size(mtd, true);
885 if (size < 0)
886 return size;
887
888 if (size > 0) {
889 nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
890 mtd_nvmem_user_otp_reg_read);
891 if (IS_ERR(nvmem)) {
892 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
893 return PTR_ERR(nvmem);
894 }
895 mtd->otp_user_nvmem = nvmem;
896 }
897 }
898
899 if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
900 size = mtd_otp_size(mtd, false);
901 if (size < 0) {
902 err = size;
903 goto err;
904 }
905
906 if (size > 0) {
907 nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
908 mtd_nvmem_fact_otp_reg_read);
909 if (IS_ERR(nvmem)) {
910 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
911 err = PTR_ERR(nvmem);
912 goto err;
913 }
914 mtd->otp_factory_nvmem = nvmem;
915 }
916 }
917
918 return 0;
919
920err:
921 if (mtd->otp_user_nvmem)
922 nvmem_unregister(mtd->otp_user_nvmem);
923 return err;
924}
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
955 struct mtd_part_parser_data *parser_data,
956 const struct mtd_partition *parts,
957 int nr_parts)
958{
959 int ret;
960
961 mtd_set_dev_defaults(mtd);
962
963 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
964 ret = add_mtd_device(mtd);
965 if (ret)
966 return ret;
967 }
968
969
970 ret = parse_mtd_partitions(mtd, types, parser_data);
971 if (ret == -EPROBE_DEFER)
972 goto out;
973
974 if (ret > 0)
975 ret = 0;
976 else if (nr_parts)
977 ret = add_mtd_partitions(mtd, parts, nr_parts);
978 else if (!device_is_registered(&mtd->dev))
979 ret = add_mtd_device(mtd);
980 else
981 ret = 0;
982
983 if (ret)
984 goto out;
985
986
987
988
989
990
991
992
993
994 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
995 "MTD already registered\n");
996 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
997 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
998 register_reboot_notifier(&mtd->reboot_notifier);
999 }
1000
1001 ret = mtd_otp_nvmem_add(mtd);
1002
1003out:
1004 if (ret && device_is_registered(&mtd->dev))
1005 del_mtd_device(mtd);
1006
1007 return ret;
1008}
1009EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1010
1011
1012
1013
1014
1015
1016
1017int mtd_device_unregister(struct mtd_info *master)
1018{
1019 int err;
1020
1021 if (master->_reboot)
1022 unregister_reboot_notifier(&master->reboot_notifier);
1023
1024 if (master->otp_user_nvmem)
1025 nvmem_unregister(master->otp_user_nvmem);
1026
1027 if (master->otp_factory_nvmem)
1028 nvmem_unregister(master->otp_factory_nvmem);
1029
1030 err = del_mtd_partitions(master);
1031 if (err)
1032 return err;
1033
1034 if (!device_is_registered(&master->dev))
1035 return 0;
1036
1037 return del_mtd_device(master);
1038}
1039EXPORT_SYMBOL_GPL(mtd_device_unregister);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049void register_mtd_user (struct mtd_notifier *new)
1050{
1051 struct mtd_info *mtd;
1052
1053 mutex_lock(&mtd_table_mutex);
1054
1055 list_add(&new->list, &mtd_notifiers);
1056
1057 __module_get(THIS_MODULE);
1058
1059 mtd_for_each_device(mtd)
1060 new->add(mtd);
1061
1062 mutex_unlock(&mtd_table_mutex);
1063}
1064EXPORT_SYMBOL_GPL(register_mtd_user);
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075int unregister_mtd_user (struct mtd_notifier *old)
1076{
1077 struct mtd_info *mtd;
1078
1079 mutex_lock(&mtd_table_mutex);
1080
1081 module_put(THIS_MODULE);
1082
1083 mtd_for_each_device(mtd)
1084 old->remove(mtd);
1085
1086 list_del(&old->list);
1087 mutex_unlock(&mtd_table_mutex);
1088 return 0;
1089}
1090EXPORT_SYMBOL_GPL(unregister_mtd_user);
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1104{
1105 struct mtd_info *ret = NULL, *other;
1106 int err = -ENODEV;
1107
1108 mutex_lock(&mtd_table_mutex);
1109
1110 if (num == -1) {
1111 mtd_for_each_device(other) {
1112 if (other == mtd) {
1113 ret = mtd;
1114 break;
1115 }
1116 }
1117 } else if (num >= 0) {
1118 ret = idr_find(&mtd_idr, num);
1119 if (mtd && mtd != ret)
1120 ret = NULL;
1121 }
1122
1123 if (!ret) {
1124 ret = ERR_PTR(err);
1125 goto out;
1126 }
1127
1128 err = __get_mtd_device(ret);
1129 if (err)
1130 ret = ERR_PTR(err);
1131out:
1132 mutex_unlock(&mtd_table_mutex);
1133 return ret;
1134}
1135EXPORT_SYMBOL_GPL(get_mtd_device);
1136
1137
1138int __get_mtd_device(struct mtd_info *mtd)
1139{
1140 struct mtd_info *master = mtd_get_master(mtd);
1141 int err;
1142
1143 if (!try_module_get(master->owner))
1144 return -ENODEV;
1145
1146 if (master->_get_device) {
1147 err = master->_get_device(mtd);
1148
1149 if (err) {
1150 module_put(master->owner);
1151 return err;
1152 }
1153 }
1154
1155 master->usecount++;
1156
1157 while (mtd->parent) {
1158 mtd->usecount++;
1159 mtd = mtd->parent;
1160 }
1161
1162 return 0;
1163}
1164EXPORT_SYMBOL_GPL(__get_mtd_device);
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174struct mtd_info *get_mtd_device_nm(const char *name)
1175{
1176 int err = -ENODEV;
1177 struct mtd_info *mtd = NULL, *other;
1178
1179 mutex_lock(&mtd_table_mutex);
1180
1181 mtd_for_each_device(other) {
1182 if (!strcmp(name, other->name)) {
1183 mtd = other;
1184 break;
1185 }
1186 }
1187
1188 if (!mtd)
1189 goto out_unlock;
1190
1191 err = __get_mtd_device(mtd);
1192 if (err)
1193 goto out_unlock;
1194
1195 mutex_unlock(&mtd_table_mutex);
1196 return mtd;
1197
1198out_unlock:
1199 mutex_unlock(&mtd_table_mutex);
1200 return ERR_PTR(err);
1201}
1202EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1203
1204void put_mtd_device(struct mtd_info *mtd)
1205{
1206 mutex_lock(&mtd_table_mutex);
1207 __put_mtd_device(mtd);
1208 mutex_unlock(&mtd_table_mutex);
1209
1210}
1211EXPORT_SYMBOL_GPL(put_mtd_device);
1212
1213void __put_mtd_device(struct mtd_info *mtd)
1214{
1215 struct mtd_info *master = mtd_get_master(mtd);
1216
1217 while (mtd->parent) {
1218 --mtd->usecount;
1219 BUG_ON(mtd->usecount < 0);
1220 mtd = mtd->parent;
1221 }
1222
1223 master->usecount--;
1224
1225 if (master->_put_device)
1226 master->_put_device(master);
1227
1228 module_put(master->owner);
1229}
1230EXPORT_SYMBOL_GPL(__put_mtd_device);
1231
1232
1233
1234
1235
1236
1237int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1238{
1239 struct mtd_info *master = mtd_get_master(mtd);
1240 u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1241 struct erase_info adjinstr;
1242 int ret;
1243
1244 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1245 adjinstr = *instr;
1246
1247 if (!mtd->erasesize || !master->_erase)
1248 return -ENOTSUPP;
1249
1250 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1251 return -EINVAL;
1252 if (!(mtd->flags & MTD_WRITEABLE))
1253 return -EROFS;
1254
1255 if (!instr->len)
1256 return 0;
1257
1258 ledtrig_mtd_activity();
1259
1260 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1261 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1262 master->erasesize;
1263 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1264 master->erasesize) -
1265 adjinstr.addr;
1266 }
1267
1268 adjinstr.addr += mst_ofs;
1269
1270 ret = master->_erase(master, &adjinstr);
1271
1272 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1273 instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1274 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1275 instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1276 master);
1277 instr->fail_addr *= mtd->erasesize;
1278 }
1279 }
1280
1281 return ret;
1282}
1283EXPORT_SYMBOL_GPL(mtd_erase);
1284
1285
1286
1287
1288int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1289 void **virt, resource_size_t *phys)
1290{
1291 struct mtd_info *master = mtd_get_master(mtd);
1292
1293 *retlen = 0;
1294 *virt = NULL;
1295 if (phys)
1296 *phys = 0;
1297 if (!master->_point)
1298 return -EOPNOTSUPP;
1299 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1300 return -EINVAL;
1301 if (!len)
1302 return 0;
1303
1304 from = mtd_get_master_ofs(mtd, from);
1305 return master->_point(master, from, len, retlen, virt, phys);
1306}
1307EXPORT_SYMBOL_GPL(mtd_point);
1308
1309
1310int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1311{
1312 struct mtd_info *master = mtd_get_master(mtd);
1313
1314 if (!master->_unpoint)
1315 return -EOPNOTSUPP;
1316 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1317 return -EINVAL;
1318 if (!len)
1319 return 0;
1320 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1321}
1322EXPORT_SYMBOL_GPL(mtd_unpoint);
1323
1324
1325
1326
1327
1328
1329unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1330 unsigned long offset, unsigned long flags)
1331{
1332 size_t retlen;
1333 void *virt;
1334 int ret;
1335
1336 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1337 if (ret)
1338 return ret;
1339 if (retlen != len) {
1340 mtd_unpoint(mtd, offset, retlen);
1341 return -ENOSYS;
1342 }
1343 return (unsigned long)virt;
1344}
1345EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1346
1347static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1348 const struct mtd_ecc_stats *old_stats)
1349{
1350 struct mtd_ecc_stats diff;
1351
1352 if (master == mtd)
1353 return;
1354
1355 diff = master->ecc_stats;
1356 diff.failed -= old_stats->failed;
1357 diff.corrected -= old_stats->corrected;
1358
1359 while (mtd->parent) {
1360 mtd->ecc_stats.failed += diff.failed;
1361 mtd->ecc_stats.corrected += diff.corrected;
1362 mtd = mtd->parent;
1363 }
1364}
1365
1366int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1367 u_char *buf)
1368{
1369 struct mtd_oob_ops ops = {
1370 .len = len,
1371 .datbuf = buf,
1372 };
1373 int ret;
1374
1375 ret = mtd_read_oob(mtd, from, &ops);
1376 *retlen = ops.retlen;
1377
1378 return ret;
1379}
1380EXPORT_SYMBOL_GPL(mtd_read);
1381
1382int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1383 const u_char *buf)
1384{
1385 struct mtd_oob_ops ops = {
1386 .len = len,
1387 .datbuf = (u8 *)buf,
1388 };
1389 int ret;
1390
1391 ret = mtd_write_oob(mtd, to, &ops);
1392 *retlen = ops.retlen;
1393
1394 return ret;
1395}
1396EXPORT_SYMBOL_GPL(mtd_write);
1397
1398
1399
1400
1401
1402
1403
1404
1405int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1406 const u_char *buf)
1407{
1408 struct mtd_info *master = mtd_get_master(mtd);
1409
1410 *retlen = 0;
1411 if (!master->_panic_write)
1412 return -EOPNOTSUPP;
1413 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1414 return -EINVAL;
1415 if (!(mtd->flags & MTD_WRITEABLE))
1416 return -EROFS;
1417 if (!len)
1418 return 0;
1419 if (!master->oops_panic_write)
1420 master->oops_panic_write = true;
1421
1422 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1423 retlen, buf);
1424}
1425EXPORT_SYMBOL_GPL(mtd_panic_write);
1426
1427static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1428 struct mtd_oob_ops *ops)
1429{
1430
1431
1432
1433
1434
1435 if (!ops->datbuf)
1436 ops->len = 0;
1437
1438 if (!ops->oobbuf)
1439 ops->ooblen = 0;
1440
1441 if (offs < 0 || offs + ops->len > mtd->size)
1442 return -EINVAL;
1443
1444 if (ops->ooblen) {
1445 size_t maxooblen;
1446
1447 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1448 return -EINVAL;
1449
1450 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1451 mtd_div_by_ws(offs, mtd)) *
1452 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1453 if (ops->ooblen > maxooblen)
1454 return -EINVAL;
1455 }
1456
1457 return 0;
1458}
1459
1460static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1461 struct mtd_oob_ops *ops)
1462{
1463 struct mtd_info *master = mtd_get_master(mtd);
1464 int ret;
1465
1466 from = mtd_get_master_ofs(mtd, from);
1467 if (master->_read_oob)
1468 ret = master->_read_oob(master, from, ops);
1469 else
1470 ret = master->_read(master, from, ops->len, &ops->retlen,
1471 ops->datbuf);
1472
1473 return ret;
1474}
1475
1476static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1477 struct mtd_oob_ops *ops)
1478{
1479 struct mtd_info *master = mtd_get_master(mtd);
1480 int ret;
1481
1482 to = mtd_get_master_ofs(mtd, to);
1483 if (master->_write_oob)
1484 ret = master->_write_oob(master, to, ops);
1485 else
1486 ret = master->_write(master, to, ops->len, &ops->retlen,
1487 ops->datbuf);
1488
1489 return ret;
1490}
1491
1492static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1493 struct mtd_oob_ops *ops)
1494{
1495 struct mtd_info *master = mtd_get_master(mtd);
1496 int ngroups = mtd_pairing_groups(master);
1497 int npairs = mtd_wunit_per_eb(master) / ngroups;
1498 struct mtd_oob_ops adjops = *ops;
1499 unsigned int wunit, oobavail;
1500 struct mtd_pairing_info info;
1501 int max_bitflips = 0;
1502 u32 ebofs, pageofs;
1503 loff_t base, pos;
1504
1505 ebofs = mtd_mod_by_eb(start, mtd);
1506 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1507 info.group = 0;
1508 info.pair = mtd_div_by_ws(ebofs, mtd);
1509 pageofs = mtd_mod_by_ws(ebofs, mtd);
1510 oobavail = mtd_oobavail(mtd, ops);
1511
1512 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1513 int ret;
1514
1515 if (info.pair >= npairs) {
1516 info.pair = 0;
1517 base += master->erasesize;
1518 }
1519
1520 wunit = mtd_pairing_info_to_wunit(master, &info);
1521 pos = mtd_wunit_to_offset(mtd, base, wunit);
1522
1523 adjops.len = ops->len - ops->retlen;
1524 if (adjops.len > mtd->writesize - pageofs)
1525 adjops.len = mtd->writesize - pageofs;
1526
1527 adjops.ooblen = ops->ooblen - ops->oobretlen;
1528 if (adjops.ooblen > oobavail - adjops.ooboffs)
1529 adjops.ooblen = oobavail - adjops.ooboffs;
1530
1531 if (read) {
1532 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1533 if (ret > 0)
1534 max_bitflips = max(max_bitflips, ret);
1535 } else {
1536 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1537 }
1538
1539 if (ret < 0)
1540 return ret;
1541
1542 max_bitflips = max(max_bitflips, ret);
1543 ops->retlen += adjops.retlen;
1544 ops->oobretlen += adjops.oobretlen;
1545 adjops.datbuf += adjops.retlen;
1546 adjops.oobbuf += adjops.oobretlen;
1547 adjops.ooboffs = 0;
1548 pageofs = 0;
1549 info.pair++;
1550 }
1551
1552 return max_bitflips;
1553}
1554
1555int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1556{
1557 struct mtd_info *master = mtd_get_master(mtd);
1558 struct mtd_ecc_stats old_stats = master->ecc_stats;
1559 int ret_code;
1560
1561 ops->retlen = ops->oobretlen = 0;
1562
1563 ret_code = mtd_check_oob_ops(mtd, from, ops);
1564 if (ret_code)
1565 return ret_code;
1566
1567 ledtrig_mtd_activity();
1568
1569
1570 if (!master->_read_oob && (!master->_read || ops->oobbuf))
1571 return -EOPNOTSUPP;
1572
1573 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1574 ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1575 else
1576 ret_code = mtd_read_oob_std(mtd, from, ops);
1577
1578 mtd_update_ecc_stats(mtd, master, &old_stats);
1579
1580
1581
1582
1583
1584
1585
1586 if (unlikely(ret_code < 0))
1587 return ret_code;
1588 if (mtd->ecc_strength == 0)
1589 return 0;
1590 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1591}
1592EXPORT_SYMBOL_GPL(mtd_read_oob);
1593
1594int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1595 struct mtd_oob_ops *ops)
1596{
1597 struct mtd_info *master = mtd_get_master(mtd);
1598 int ret;
1599
1600 ops->retlen = ops->oobretlen = 0;
1601
1602 if (!(mtd->flags & MTD_WRITEABLE))
1603 return -EROFS;
1604
1605 ret = mtd_check_oob_ops(mtd, to, ops);
1606 if (ret)
1607 return ret;
1608
1609 ledtrig_mtd_activity();
1610
1611
1612 if (!master->_write_oob && (!master->_write || ops->oobbuf))
1613 return -EOPNOTSUPP;
1614
1615 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1616 return mtd_io_emulated_slc(mtd, to, false, ops);
1617
1618 return mtd_write_oob_std(mtd, to, ops);
1619}
1620EXPORT_SYMBOL_GPL(mtd_write_oob);
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1639 struct mtd_oob_region *oobecc)
1640{
1641 struct mtd_info *master = mtd_get_master(mtd);
1642
1643 memset(oobecc, 0, sizeof(*oobecc));
1644
1645 if (!master || section < 0)
1646 return -EINVAL;
1647
1648 if (!master->ooblayout || !master->ooblayout->ecc)
1649 return -ENOTSUPP;
1650
1651 return master->ooblayout->ecc(master, section, oobecc);
1652}
1653EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1673 struct mtd_oob_region *oobfree)
1674{
1675 struct mtd_info *master = mtd_get_master(mtd);
1676
1677 memset(oobfree, 0, sizeof(*oobfree));
1678
1679 if (!master || section < 0)
1680 return -EINVAL;
1681
1682 if (!master->ooblayout || !master->ooblayout->free)
1683 return -ENOTSUPP;
1684
1685 return master->ooblayout->free(master, section, oobfree);
1686}
1687EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1707 int *sectionp, struct mtd_oob_region *oobregion,
1708 int (*iter)(struct mtd_info *,
1709 int section,
1710 struct mtd_oob_region *oobregion))
1711{
1712 int pos = 0, ret, section = 0;
1713
1714 memset(oobregion, 0, sizeof(*oobregion));
1715
1716 while (1) {
1717 ret = iter(mtd, section, oobregion);
1718 if (ret)
1719 return ret;
1720
1721 if (pos + oobregion->length > byte)
1722 break;
1723
1724 pos += oobregion->length;
1725 section++;
1726 }
1727
1728
1729
1730
1731
1732 oobregion->offset += byte - pos;
1733 oobregion->length -= byte - pos;
1734 *sectionp = section;
1735
1736 return 0;
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1753 int *section,
1754 struct mtd_oob_region *oobregion)
1755{
1756 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1757 mtd_ooblayout_ecc);
1758}
1759EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1776 const u8 *oobbuf, int start, int nbytes,
1777 int (*iter)(struct mtd_info *,
1778 int section,
1779 struct mtd_oob_region *oobregion))
1780{
1781 struct mtd_oob_region oobregion;
1782 int section, ret;
1783
1784 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1785 &oobregion, iter);
1786
1787 while (!ret) {
1788 int cnt;
1789
1790 cnt = min_t(int, nbytes, oobregion.length);
1791 memcpy(buf, oobbuf + oobregion.offset, cnt);
1792 buf += cnt;
1793 nbytes -= cnt;
1794
1795 if (!nbytes)
1796 break;
1797
1798 ret = iter(mtd, ++section, &oobregion);
1799 }
1800
1801 return ret;
1802}
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1819 u8 *oobbuf, int start, int nbytes,
1820 int (*iter)(struct mtd_info *,
1821 int section,
1822 struct mtd_oob_region *oobregion))
1823{
1824 struct mtd_oob_region oobregion;
1825 int section, ret;
1826
1827 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1828 &oobregion, iter);
1829
1830 while (!ret) {
1831 int cnt;
1832
1833 cnt = min_t(int, nbytes, oobregion.length);
1834 memcpy(oobbuf + oobregion.offset, buf, cnt);
1835 buf += cnt;
1836 nbytes -= cnt;
1837
1838 if (!nbytes)
1839 break;
1840
1841 ret = iter(mtd, ++section, &oobregion);
1842 }
1843
1844 return ret;
1845}
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1857 int (*iter)(struct mtd_info *,
1858 int section,
1859 struct mtd_oob_region *oobregion))
1860{
1861 struct mtd_oob_region oobregion;
1862 int section = 0, ret, nbytes = 0;
1863
1864 while (1) {
1865 ret = iter(mtd, section++, &oobregion);
1866 if (ret) {
1867 if (ret == -ERANGE)
1868 ret = nbytes;
1869 break;
1870 }
1871
1872 nbytes += oobregion.length;
1873 }
1874
1875 return ret;
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1891 const u8 *oobbuf, int start, int nbytes)
1892{
1893 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1894 mtd_ooblayout_ecc);
1895}
1896EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1911 u8 *oobbuf, int start, int nbytes)
1912{
1913 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1914 mtd_ooblayout_ecc);
1915}
1916EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1931 const u8 *oobbuf, int start, int nbytes)
1932{
1933 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1934 mtd_ooblayout_free);
1935}
1936EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1951 u8 *oobbuf, int start, int nbytes)
1952{
1953 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1954 mtd_ooblayout_free);
1955}
1956EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1967{
1968 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1969}
1970EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1981{
1982 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1983}
1984EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1985
1986
1987
1988
1989
1990
1991int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1992 struct otp_info *buf)
1993{
1994 struct mtd_info *master = mtd_get_master(mtd);
1995
1996 if (!master->_get_fact_prot_info)
1997 return -EOPNOTSUPP;
1998 if (!len)
1999 return 0;
2000 return master->_get_fact_prot_info(master, len, retlen, buf);
2001}
2002EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2003
2004int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2005 size_t *retlen, u_char *buf)
2006{
2007 struct mtd_info *master = mtd_get_master(mtd);
2008
2009 *retlen = 0;
2010 if (!master->_read_fact_prot_reg)
2011 return -EOPNOTSUPP;
2012 if (!len)
2013 return 0;
2014 return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2015}
2016EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2017
2018int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2019 struct otp_info *buf)
2020{
2021 struct mtd_info *master = mtd_get_master(mtd);
2022
2023 if (!master->_get_user_prot_info)
2024 return -EOPNOTSUPP;
2025 if (!len)
2026 return 0;
2027 return master->_get_user_prot_info(master, len, retlen, buf);
2028}
2029EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2030
2031int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2032 size_t *retlen, u_char *buf)
2033{
2034 struct mtd_info *master = mtd_get_master(mtd);
2035
2036 *retlen = 0;
2037 if (!master->_read_user_prot_reg)
2038 return -EOPNOTSUPP;
2039 if (!len)
2040 return 0;
2041 return master->_read_user_prot_reg(master, from, len, retlen, buf);
2042}
2043EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2044
2045int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2046 size_t *retlen, const u_char *buf)
2047{
2048 struct mtd_info *master = mtd_get_master(mtd);
2049 int ret;
2050
2051 *retlen = 0;
2052 if (!master->_write_user_prot_reg)
2053 return -EOPNOTSUPP;
2054 if (!len)
2055 return 0;
2056 ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2057 if (ret)
2058 return ret;
2059
2060
2061
2062
2063
2064 return (*retlen) ? 0 : -ENOSPC;
2065}
2066EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2067
2068int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2069{
2070 struct mtd_info *master = mtd_get_master(mtd);
2071
2072 if (!master->_lock_user_prot_reg)
2073 return -EOPNOTSUPP;
2074 if (!len)
2075 return 0;
2076 return master->_lock_user_prot_reg(master, from, len);
2077}
2078EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2079
2080int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2081{
2082 struct mtd_info *master = mtd_get_master(mtd);
2083
2084 if (!master->_erase_user_prot_reg)
2085 return -EOPNOTSUPP;
2086 if (!len)
2087 return 0;
2088 return master->_erase_user_prot_reg(master, from, len);
2089}
2090EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2091
2092
2093int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2094{
2095 struct mtd_info *master = mtd_get_master(mtd);
2096
2097 if (!master->_lock)
2098 return -EOPNOTSUPP;
2099 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2100 return -EINVAL;
2101 if (!len)
2102 return 0;
2103
2104 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2105 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2106 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2107 }
2108
2109 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2110}
2111EXPORT_SYMBOL_GPL(mtd_lock);
2112
2113int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2114{
2115 struct mtd_info *master = mtd_get_master(mtd);
2116
2117 if (!master->_unlock)
2118 return -EOPNOTSUPP;
2119 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2120 return -EINVAL;
2121 if (!len)
2122 return 0;
2123
2124 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2125 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2126 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2127 }
2128
2129 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2130}
2131EXPORT_SYMBOL_GPL(mtd_unlock);
2132
2133int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2134{
2135 struct mtd_info *master = mtd_get_master(mtd);
2136
2137 if (!master->_is_locked)
2138 return -EOPNOTSUPP;
2139 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2140 return -EINVAL;
2141 if (!len)
2142 return 0;
2143
2144 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2145 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2146 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2147 }
2148
2149 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2150}
2151EXPORT_SYMBOL_GPL(mtd_is_locked);
2152
2153int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2154{
2155 struct mtd_info *master = mtd_get_master(mtd);
2156
2157 if (ofs < 0 || ofs >= mtd->size)
2158 return -EINVAL;
2159 if (!master->_block_isreserved)
2160 return 0;
2161
2162 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2163 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2164
2165 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2166}
2167EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2168
2169int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2170{
2171 struct mtd_info *master = mtd_get_master(mtd);
2172
2173 if (ofs < 0 || ofs >= mtd->size)
2174 return -EINVAL;
2175 if (!master->_block_isbad)
2176 return 0;
2177
2178 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2179 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2180
2181 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2182}
2183EXPORT_SYMBOL_GPL(mtd_block_isbad);
2184
2185int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2186{
2187 struct mtd_info *master = mtd_get_master(mtd);
2188 int ret;
2189
2190 if (!master->_block_markbad)
2191 return -EOPNOTSUPP;
2192 if (ofs < 0 || ofs >= mtd->size)
2193 return -EINVAL;
2194 if (!(mtd->flags & MTD_WRITEABLE))
2195 return -EROFS;
2196
2197 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2198 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2199
2200 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2201 if (ret)
2202 return ret;
2203
2204 while (mtd->parent) {
2205 mtd->ecc_stats.badblocks++;
2206 mtd = mtd->parent;
2207 }
2208
2209 return 0;
2210}
2211EXPORT_SYMBOL_GPL(mtd_block_markbad);
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2225 unsigned long count, loff_t to, size_t *retlen)
2226{
2227 unsigned long i;
2228 size_t totlen = 0, thislen;
2229 int ret = 0;
2230
2231 for (i = 0; i < count; i++) {
2232 if (!vecs[i].iov_len)
2233 continue;
2234 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2235 vecs[i].iov_base);
2236 totlen += thislen;
2237 if (ret || thislen != vecs[i].iov_len)
2238 break;
2239 to += vecs[i].iov_len;
2240 }
2241 *retlen = totlen;
2242 return ret;
2243}
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2257 unsigned long count, loff_t to, size_t *retlen)
2258{
2259 struct mtd_info *master = mtd_get_master(mtd);
2260
2261 *retlen = 0;
2262 if (!(mtd->flags & MTD_WRITEABLE))
2263 return -EROFS;
2264
2265 if (!master->_writev)
2266 return default_mtd_writev(mtd, vecs, count, to, retlen);
2267
2268 return master->_writev(master, vecs, count,
2269 mtd_get_master_ofs(mtd, to), retlen);
2270}
2271EXPORT_SYMBOL_GPL(mtd_writev);
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2298{
2299 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2300 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2301 void *kbuf;
2302
2303 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2304
2305 while (*size > min_alloc) {
2306 kbuf = kmalloc(*size, flags);
2307 if (kbuf)
2308 return kbuf;
2309
2310 *size >>= 1;
2311 *size = ALIGN(*size, mtd->writesize);
2312 }
2313
2314
2315
2316
2317
2318 return kmalloc(*size, GFP_KERNEL);
2319}
2320EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2321
2322#ifdef CONFIG_PROC_FS
2323
2324
2325
2326
2327static int mtd_proc_show(struct seq_file *m, void *v)
2328{
2329 struct mtd_info *mtd;
2330
2331 seq_puts(m, "dev: size erasesize name\n");
2332 mutex_lock(&mtd_table_mutex);
2333 mtd_for_each_device(mtd) {
2334 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2335 mtd->index, (unsigned long long)mtd->size,
2336 mtd->erasesize, mtd->name);
2337 }
2338 mutex_unlock(&mtd_table_mutex);
2339 return 0;
2340}
2341#endif
2342
2343
2344
2345
2346static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2347{
2348 struct backing_dev_info *bdi;
2349 int ret;
2350
2351 bdi = bdi_alloc(NUMA_NO_NODE);
2352 if (!bdi)
2353 return ERR_PTR(-ENOMEM);
2354 bdi->ra_pages = 0;
2355 bdi->io_pages = 0;
2356
2357
2358
2359
2360
2361 ret = bdi_register(bdi, "%.28s-0", name);
2362 if (ret)
2363 bdi_put(bdi);
2364
2365 return ret ? ERR_PTR(ret) : bdi;
2366}
2367
2368static struct proc_dir_entry *proc_mtd;
2369
2370static int __init init_mtd(void)
2371{
2372 int ret;
2373
2374 ret = class_register(&mtd_class);
2375 if (ret)
2376 goto err_reg;
2377
2378 mtd_bdi = mtd_bdi_init("mtd");
2379 if (IS_ERR(mtd_bdi)) {
2380 ret = PTR_ERR(mtd_bdi);
2381 goto err_bdi;
2382 }
2383
2384 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2385
2386 ret = init_mtdchar();
2387 if (ret)
2388 goto out_procfs;
2389
2390 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2391
2392 return 0;
2393
2394out_procfs:
2395 if (proc_mtd)
2396 remove_proc_entry("mtd", NULL);
2397 bdi_put(mtd_bdi);
2398err_bdi:
2399 class_unregister(&mtd_class);
2400err_reg:
2401 pr_err("Error registering mtd class or bdi: %d\n", ret);
2402 return ret;
2403}
2404
2405static void __exit cleanup_mtd(void)
2406{
2407 debugfs_remove_recursive(dfs_dir_mtd);
2408 cleanup_mtdchar();
2409 if (proc_mtd)
2410 remove_proc_entry("mtd", NULL);
2411 class_unregister(&mtd_class);
2412 bdi_put(mtd_bdi);
2413 idr_destroy(&mtd_idr);
2414}
2415
2416module_init(init_mtd);
2417module_exit(cleanup_mtd);
2418
2419MODULE_LICENSE("GPL");
2420MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2421MODULE_DESCRIPTION("Core MTD registration and access routines");
2422