1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/ptrace.h>
27#include <linux/seq_file.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/major.h>
31#include <linux/fs.h>
32#include <linux/err.h>
33#include <linux/ioctl.h>
34#include <linux/init.h>
35#include <linux/proc_fs.h>
36#include <linux/idr.h>
37#include <linux/backing-dev.h>
38#include <linux/gfp.h>
39#include <linux/slab.h>
40#include <linux/reboot.h>
41
42#include <linux/mtd/mtd.h>
43#include <linux/mtd/partitions.h>
44
45#include "mtdcore.h"
46
47static struct backing_dev_info mtd_bdi = {
48};
49
50static int mtd_cls_suspend(struct device *dev, pm_message_t state);
51static int mtd_cls_resume(struct device *dev);
52
53static struct class mtd_class = {
54 .name = "mtd",
55 .owner = THIS_MODULE,
56 .suspend = mtd_cls_suspend,
57 .resume = mtd_cls_resume,
58};
59
60static DEFINE_IDR(mtd_idr);
61
62
63
64DEFINE_MUTEX(mtd_table_mutex);
65EXPORT_SYMBOL_GPL(mtd_table_mutex);
66
67struct mtd_info *__mtd_next_device(int i)
68{
69 return idr_get_next(&mtd_idr, &i);
70}
71EXPORT_SYMBOL_GPL(__mtd_next_device);
72
73static LIST_HEAD(mtd_notifiers);
74
75
76#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
77
78
79
80
81static void mtd_release(struct device *dev)
82{
83 struct mtd_info *mtd = dev_get_drvdata(dev);
84 dev_t index = MTD_DEVT(mtd->index);
85
86
87 device_destroy(&mtd_class, index + 1);
88}
89
90static int mtd_cls_suspend(struct device *dev, pm_message_t state)
91{
92 struct mtd_info *mtd = dev_get_drvdata(dev);
93
94 return mtd ? mtd_suspend(mtd) : 0;
95}
96
97static int mtd_cls_resume(struct device *dev)
98{
99 struct mtd_info *mtd = dev_get_drvdata(dev);
100
101 if (mtd)
102 mtd_resume(mtd);
103 return 0;
104}
105
106static ssize_t mtd_type_show(struct device *dev,
107 struct device_attribute *attr, char *buf)
108{
109 struct mtd_info *mtd = dev_get_drvdata(dev);
110 char *type;
111
112 switch (mtd->type) {
113 case MTD_ABSENT:
114 type = "absent";
115 break;
116 case MTD_RAM:
117 type = "ram";
118 break;
119 case MTD_ROM:
120 type = "rom";
121 break;
122 case MTD_NORFLASH:
123 type = "nor";
124 break;
125 case MTD_NANDFLASH:
126 type = "nand";
127 break;
128 case MTD_DATAFLASH:
129 type = "dataflash";
130 break;
131 case MTD_UBIVOLUME:
132 type = "ubi";
133 break;
134 case MTD_MLCNANDFLASH:
135 type = "mlc-nand";
136 break;
137 default:
138 type = "unknown";
139 }
140
141 return snprintf(buf, PAGE_SIZE, "%s\n", type);
142}
143static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
144
145static ssize_t mtd_flags_show(struct device *dev,
146 struct device_attribute *attr, char *buf)
147{
148 struct mtd_info *mtd = dev_get_drvdata(dev);
149
150 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
151
152}
153static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
154
155static ssize_t mtd_size_show(struct device *dev,
156 struct device_attribute *attr, char *buf)
157{
158 struct mtd_info *mtd = dev_get_drvdata(dev);
159
160 return snprintf(buf, PAGE_SIZE, "%llu\n",
161 (unsigned long long)mtd->size);
162
163}
164static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
165
166static ssize_t mtd_erasesize_show(struct device *dev,
167 struct device_attribute *attr, char *buf)
168{
169 struct mtd_info *mtd = dev_get_drvdata(dev);
170
171 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
172
173}
174static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
175
176static ssize_t mtd_writesize_show(struct device *dev,
177 struct device_attribute *attr, char *buf)
178{
179 struct mtd_info *mtd = dev_get_drvdata(dev);
180
181 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
182
183}
184static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
185
186static ssize_t mtd_subpagesize_show(struct device *dev,
187 struct device_attribute *attr, char *buf)
188{
189 struct mtd_info *mtd = dev_get_drvdata(dev);
190 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
191
192 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
193
194}
195static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
196
197static ssize_t mtd_oobsize_show(struct device *dev,
198 struct device_attribute *attr, char *buf)
199{
200 struct mtd_info *mtd = dev_get_drvdata(dev);
201
202 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
203
204}
205static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
206
207static ssize_t mtd_numeraseregions_show(struct device *dev,
208 struct device_attribute *attr, char *buf)
209{
210 struct mtd_info *mtd = dev_get_drvdata(dev);
211
212 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
213
214}
215static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
216 NULL);
217
218static ssize_t mtd_name_show(struct device *dev,
219 struct device_attribute *attr, char *buf)
220{
221 struct mtd_info *mtd = dev_get_drvdata(dev);
222
223 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
224
225}
226static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
227
228static ssize_t mtd_ecc_strength_show(struct device *dev,
229 struct device_attribute *attr, char *buf)
230{
231 struct mtd_info *mtd = dev_get_drvdata(dev);
232
233 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
234}
235static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
236
237static ssize_t mtd_bitflip_threshold_show(struct device *dev,
238 struct device_attribute *attr,
239 char *buf)
240{
241 struct mtd_info *mtd = dev_get_drvdata(dev);
242
243 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
244}
245
246static ssize_t mtd_bitflip_threshold_store(struct device *dev,
247 struct device_attribute *attr,
248 const char *buf, size_t count)
249{
250 struct mtd_info *mtd = dev_get_drvdata(dev);
251 unsigned int bitflip_threshold;
252 int retval;
253
254 retval = kstrtouint(buf, 0, &bitflip_threshold);
255 if (retval)
256 return retval;
257
258 mtd->bitflip_threshold = bitflip_threshold;
259 return count;
260}
261static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
262 mtd_bitflip_threshold_show,
263 mtd_bitflip_threshold_store);
264
265static ssize_t mtd_ecc_step_size_show(struct device *dev,
266 struct device_attribute *attr, char *buf)
267{
268 struct mtd_info *mtd = dev_get_drvdata(dev);
269
270 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
271
272}
273static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
274
275static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
276 struct device_attribute *attr, char *buf)
277{
278 struct mtd_info *mtd = dev_get_drvdata(dev);
279 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
280
281 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
282}
283static DEVICE_ATTR(corrected_bits, S_IRUGO,
284 mtd_ecc_stats_corrected_show, NULL);
285
286static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
287 struct device_attribute *attr, char *buf)
288{
289 struct mtd_info *mtd = dev_get_drvdata(dev);
290 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
291
292 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
293}
294static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
295
296static ssize_t mtd_badblocks_show(struct device *dev,
297 struct device_attribute *attr, char *buf)
298{
299 struct mtd_info *mtd = dev_get_drvdata(dev);
300 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
301
302 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
303}
304static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
305
306static ssize_t mtd_bbtblocks_show(struct device *dev,
307 struct device_attribute *attr, char *buf)
308{
309 struct mtd_info *mtd = dev_get_drvdata(dev);
310 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
311
312 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
313}
314static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
315
316static struct attribute *mtd_attrs[] = {
317 &dev_attr_type.attr,
318 &dev_attr_flags.attr,
319 &dev_attr_size.attr,
320 &dev_attr_erasesize.attr,
321 &dev_attr_writesize.attr,
322 &dev_attr_subpagesize.attr,
323 &dev_attr_oobsize.attr,
324 &dev_attr_numeraseregions.attr,
325 &dev_attr_name.attr,
326 &dev_attr_ecc_strength.attr,
327 &dev_attr_ecc_step_size.attr,
328 &dev_attr_corrected_bits.attr,
329 &dev_attr_ecc_failures.attr,
330 &dev_attr_bad_blocks.attr,
331 &dev_attr_bbt_blocks.attr,
332 &dev_attr_bitflip_threshold.attr,
333 NULL,
334};
335ATTRIBUTE_GROUPS(mtd);
336
337static struct device_type mtd_devtype = {
338 .name = "mtd",
339 .groups = mtd_groups,
340 .release = mtd_release,
341};
342
343#ifndef CONFIG_MMU
344unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
345{
346 switch (mtd->type) {
347 case MTD_RAM:
348 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
349 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
350 case MTD_ROM:
351 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
352 NOMMU_MAP_READ;
353 default:
354 return NOMMU_MAP_COPY;
355 }
356}
357EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
358#endif
359
360static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
361 void *cmd)
362{
363 struct mtd_info *mtd;
364
365 mtd = container_of(n, struct mtd_info, reboot_notifier);
366 mtd->_reboot(mtd);
367
368 return NOTIFY_DONE;
369}
370
371
372
373
374
375
376
377
378
379
380
381int add_mtd_device(struct mtd_info *mtd)
382{
383 struct mtd_notifier *not;
384 int i, error;
385
386 mtd->backing_dev_info = &mtd_bdi;
387
388 BUG_ON(mtd->writesize == 0);
389 mutex_lock(&mtd_table_mutex);
390
391 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
392 if (i < 0)
393 goto fail_locked;
394
395 mtd->index = i;
396 mtd->usecount = 0;
397
398
399 if (mtd->bitflip_threshold == 0)
400 mtd->bitflip_threshold = mtd->ecc_strength;
401
402 if (is_power_of_2(mtd->erasesize))
403 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
404 else
405 mtd->erasesize_shift = 0;
406
407 if (is_power_of_2(mtd->writesize))
408 mtd->writesize_shift = ffs(mtd->writesize) - 1;
409 else
410 mtd->writesize_shift = 0;
411
412 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
413 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
414
415
416 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
417 error = mtd_unlock(mtd, 0, mtd->size);
418 if (error && error != -EOPNOTSUPP)
419 printk(KERN_WARNING
420 "%s: unlock failed, writes may not work\n",
421 mtd->name);
422 }
423
424
425
426
427 mtd->dev.type = &mtd_devtype;
428 mtd->dev.class = &mtd_class;
429 mtd->dev.devt = MTD_DEVT(i);
430 dev_set_name(&mtd->dev, "mtd%d", i);
431 dev_set_drvdata(&mtd->dev, mtd);
432 if (device_register(&mtd->dev) != 0)
433 goto fail_added;
434
435 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
436 "mtd%dro", i);
437
438 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
439
440
441 list_for_each_entry(not, &mtd_notifiers, list)
442 not->add(mtd);
443
444 mutex_unlock(&mtd_table_mutex);
445
446
447
448
449 __module_get(THIS_MODULE);
450 return 0;
451
452fail_added:
453 idr_remove(&mtd_idr, i);
454fail_locked:
455 mutex_unlock(&mtd_table_mutex);
456 return 1;
457}
458
459
460
461
462
463
464
465
466
467
468
469int del_mtd_device(struct mtd_info *mtd)
470{
471 int ret;
472 struct mtd_notifier *not;
473
474 mutex_lock(&mtd_table_mutex);
475
476 if (idr_find(&mtd_idr, mtd->index) != mtd) {
477 ret = -ENODEV;
478 goto out_error;
479 }
480
481
482
483 list_for_each_entry(not, &mtd_notifiers, list)
484 not->remove(mtd);
485
486 if (mtd->usecount) {
487 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
488 mtd->index, mtd->name, mtd->usecount);
489 ret = -EBUSY;
490 } else {
491 device_unregister(&mtd->dev);
492
493 idr_remove(&mtd_idr, mtd->index);
494
495 module_put(THIS_MODULE);
496 ret = 0;
497 }
498
499out_error:
500 mutex_unlock(&mtd_table_mutex);
501 return ret;
502}
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
533 struct mtd_part_parser_data *parser_data,
534 const struct mtd_partition *parts,
535 int nr_parts)
536{
537 int err;
538 struct mtd_partition *real_parts;
539
540 err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
541 if (err <= 0 && nr_parts && parts) {
542 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
543 GFP_KERNEL);
544 if (!real_parts)
545 err = -ENOMEM;
546 else
547 err = nr_parts;
548 }
549
550 if (err > 0) {
551 err = add_mtd_partitions(mtd, real_parts, err);
552 kfree(real_parts);
553 } else if (err == 0) {
554 err = add_mtd_device(mtd);
555 if (err == 1)
556 err = -ENODEV;
557 }
558
559
560
561
562
563
564
565
566
567 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
568 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
569 register_reboot_notifier(&mtd->reboot_notifier);
570 }
571
572 return err;
573}
574EXPORT_SYMBOL_GPL(mtd_device_parse_register);
575
576
577
578
579
580
581
582int mtd_device_unregister(struct mtd_info *master)
583{
584 int err;
585
586 if (master->_reboot)
587 unregister_reboot_notifier(&master->reboot_notifier);
588
589 err = del_mtd_partitions(master);
590 if (err)
591 return err;
592
593 if (!device_is_registered(&master->dev))
594 return 0;
595
596 return del_mtd_device(master);
597}
598EXPORT_SYMBOL_GPL(mtd_device_unregister);
599
600
601
602
603
604
605
606
607
608void register_mtd_user (struct mtd_notifier *new)
609{
610 struct mtd_info *mtd;
611
612 mutex_lock(&mtd_table_mutex);
613
614 list_add(&new->list, &mtd_notifiers);
615
616 __module_get(THIS_MODULE);
617
618 mtd_for_each_device(mtd)
619 new->add(mtd);
620
621 mutex_unlock(&mtd_table_mutex);
622}
623EXPORT_SYMBOL_GPL(register_mtd_user);
624
625
626
627
628
629
630
631
632
633
634int unregister_mtd_user (struct mtd_notifier *old)
635{
636 struct mtd_info *mtd;
637
638 mutex_lock(&mtd_table_mutex);
639
640 module_put(THIS_MODULE);
641
642 mtd_for_each_device(mtd)
643 old->remove(mtd);
644
645 list_del(&old->list);
646 mutex_unlock(&mtd_table_mutex);
647 return 0;
648}
649EXPORT_SYMBOL_GPL(unregister_mtd_user);
650
651
652
653
654
655
656
657
658
659
660
661
662struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
663{
664 struct mtd_info *ret = NULL, *other;
665 int err = -ENODEV;
666
667 mutex_lock(&mtd_table_mutex);
668
669 if (num == -1) {
670 mtd_for_each_device(other) {
671 if (other == mtd) {
672 ret = mtd;
673 break;
674 }
675 }
676 } else if (num >= 0) {
677 ret = idr_find(&mtd_idr, num);
678 if (mtd && mtd != ret)
679 ret = NULL;
680 }
681
682 if (!ret) {
683 ret = ERR_PTR(err);
684 goto out;
685 }
686
687 err = __get_mtd_device(ret);
688 if (err)
689 ret = ERR_PTR(err);
690out:
691 mutex_unlock(&mtd_table_mutex);
692 return ret;
693}
694EXPORT_SYMBOL_GPL(get_mtd_device);
695
696
697int __get_mtd_device(struct mtd_info *mtd)
698{
699 int err;
700
701 if (!try_module_get(mtd->owner))
702 return -ENODEV;
703
704 if (mtd->_get_device) {
705 err = mtd->_get_device(mtd);
706
707 if (err) {
708 module_put(mtd->owner);
709 return err;
710 }
711 }
712 mtd->usecount++;
713 return 0;
714}
715EXPORT_SYMBOL_GPL(__get_mtd_device);
716
717
718
719
720
721
722
723
724
725struct mtd_info *get_mtd_device_nm(const char *name)
726{
727 int err = -ENODEV;
728 struct mtd_info *mtd = NULL, *other;
729
730 mutex_lock(&mtd_table_mutex);
731
732 mtd_for_each_device(other) {
733 if (!strcmp(name, other->name)) {
734 mtd = other;
735 break;
736 }
737 }
738
739 if (!mtd)
740 goto out_unlock;
741
742 err = __get_mtd_device(mtd);
743 if (err)
744 goto out_unlock;
745
746 mutex_unlock(&mtd_table_mutex);
747 return mtd;
748
749out_unlock:
750 mutex_unlock(&mtd_table_mutex);
751 return ERR_PTR(err);
752}
753EXPORT_SYMBOL_GPL(get_mtd_device_nm);
754
755void put_mtd_device(struct mtd_info *mtd)
756{
757 mutex_lock(&mtd_table_mutex);
758 __put_mtd_device(mtd);
759 mutex_unlock(&mtd_table_mutex);
760
761}
762EXPORT_SYMBOL_GPL(put_mtd_device);
763
764void __put_mtd_device(struct mtd_info *mtd)
765{
766 --mtd->usecount;
767 BUG_ON(mtd->usecount < 0);
768
769 if (mtd->_put_device)
770 mtd->_put_device(mtd);
771
772 module_put(mtd->owner);
773}
774EXPORT_SYMBOL_GPL(__put_mtd_device);
775
776
777
778
779
780
781
782
783int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
784{
785 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
786 return -EINVAL;
787 if (!(mtd->flags & MTD_WRITEABLE))
788 return -EROFS;
789 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
790 if (!instr->len) {
791 instr->state = MTD_ERASE_DONE;
792 mtd_erase_callback(instr);
793 return 0;
794 }
795 return mtd->_erase(mtd, instr);
796}
797EXPORT_SYMBOL_GPL(mtd_erase);
798
799
800
801
802int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
803 void **virt, resource_size_t *phys)
804{
805 *retlen = 0;
806 *virt = NULL;
807 if (phys)
808 *phys = 0;
809 if (!mtd->_point)
810 return -EOPNOTSUPP;
811 if (from < 0 || from >= mtd->size || len > mtd->size - from)
812 return -EINVAL;
813 if (!len)
814 return 0;
815 return mtd->_point(mtd, from, len, retlen, virt, phys);
816}
817EXPORT_SYMBOL_GPL(mtd_point);
818
819
820int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
821{
822 if (!mtd->_point)
823 return -EOPNOTSUPP;
824 if (from < 0 || from >= mtd->size || len > mtd->size - from)
825 return -EINVAL;
826 if (!len)
827 return 0;
828 return mtd->_unpoint(mtd, from, len);
829}
830EXPORT_SYMBOL_GPL(mtd_unpoint);
831
832
833
834
835
836
837unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
838 unsigned long offset, unsigned long flags)
839{
840 if (!mtd->_get_unmapped_area)
841 return -EOPNOTSUPP;
842 if (offset >= mtd->size || len > mtd->size - offset)
843 return -EINVAL;
844 return mtd->_get_unmapped_area(mtd, len, offset, flags);
845}
846EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
847
848int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
849 u_char *buf)
850{
851 int ret_code;
852 *retlen = 0;
853 if (from < 0 || from >= mtd->size || len > mtd->size - from)
854 return -EINVAL;
855 if (!len)
856 return 0;
857
858
859
860
861
862
863 ret_code = mtd->_read(mtd, from, len, retlen, buf);
864 if (unlikely(ret_code < 0))
865 return ret_code;
866 if (mtd->ecc_strength == 0)
867 return 0;
868 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
869}
870EXPORT_SYMBOL_GPL(mtd_read);
871
872int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
873 const u_char *buf)
874{
875 *retlen = 0;
876 if (to < 0 || to >= mtd->size || len > mtd->size - to)
877 return -EINVAL;
878 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
879 return -EROFS;
880 if (!len)
881 return 0;
882 return mtd->_write(mtd, to, len, retlen, buf);
883}
884EXPORT_SYMBOL_GPL(mtd_write);
885
886
887
888
889
890
891
892
893int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
894 const u_char *buf)
895{
896 *retlen = 0;
897 if (!mtd->_panic_write)
898 return -EOPNOTSUPP;
899 if (to < 0 || to >= mtd->size || len > mtd->size - to)
900 return -EINVAL;
901 if (!(mtd->flags & MTD_WRITEABLE))
902 return -EROFS;
903 if (!len)
904 return 0;
905 return mtd->_panic_write(mtd, to, len, retlen, buf);
906}
907EXPORT_SYMBOL_GPL(mtd_panic_write);
908
909int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
910{
911 int ret_code;
912 ops->retlen = ops->oobretlen = 0;
913 if (!mtd->_read_oob)
914 return -EOPNOTSUPP;
915
916
917
918
919
920
921 ret_code = mtd->_read_oob(mtd, from, ops);
922 if (unlikely(ret_code < 0))
923 return ret_code;
924 if (mtd->ecc_strength == 0)
925 return 0;
926 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
927}
928EXPORT_SYMBOL_GPL(mtd_read_oob);
929
930
931
932
933
934
935int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
936 struct otp_info *buf)
937{
938 if (!mtd->_get_fact_prot_info)
939 return -EOPNOTSUPP;
940 if (!len)
941 return 0;
942 return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
943}
944EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
945
946int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
947 size_t *retlen, u_char *buf)
948{
949 *retlen = 0;
950 if (!mtd->_read_fact_prot_reg)
951 return -EOPNOTSUPP;
952 if (!len)
953 return 0;
954 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
955}
956EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
957
958int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
959 struct otp_info *buf)
960{
961 if (!mtd->_get_user_prot_info)
962 return -EOPNOTSUPP;
963 if (!len)
964 return 0;
965 return mtd->_get_user_prot_info(mtd, len, retlen, buf);
966}
967EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
968
969int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
970 size_t *retlen, u_char *buf)
971{
972 *retlen = 0;
973 if (!mtd->_read_user_prot_reg)
974 return -EOPNOTSUPP;
975 if (!len)
976 return 0;
977 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
978}
979EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
980
981int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
982 size_t *retlen, u_char *buf)
983{
984 int ret;
985
986 *retlen = 0;
987 if (!mtd->_write_user_prot_reg)
988 return -EOPNOTSUPP;
989 if (!len)
990 return 0;
991 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
992 if (ret)
993 return ret;
994
995
996
997
998
999 return (*retlen) ? 0 : -ENOSPC;
1000}
1001EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1002
1003int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1004{
1005 if (!mtd->_lock_user_prot_reg)
1006 return -EOPNOTSUPP;
1007 if (!len)
1008 return 0;
1009 return mtd->_lock_user_prot_reg(mtd, from, len);
1010}
1011EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1012
1013
1014int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1015{
1016 if (!mtd->_lock)
1017 return -EOPNOTSUPP;
1018 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1019 return -EINVAL;
1020 if (!len)
1021 return 0;
1022 return mtd->_lock(mtd, ofs, len);
1023}
1024EXPORT_SYMBOL_GPL(mtd_lock);
1025
1026int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1027{
1028 if (!mtd->_unlock)
1029 return -EOPNOTSUPP;
1030 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1031 return -EINVAL;
1032 if (!len)
1033 return 0;
1034 return mtd->_unlock(mtd, ofs, len);
1035}
1036EXPORT_SYMBOL_GPL(mtd_unlock);
1037
1038int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1039{
1040 if (!mtd->_is_locked)
1041 return -EOPNOTSUPP;
1042 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1043 return -EINVAL;
1044 if (!len)
1045 return 0;
1046 return mtd->_is_locked(mtd, ofs, len);
1047}
1048EXPORT_SYMBOL_GPL(mtd_is_locked);
1049
1050int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1051{
1052 if (ofs < 0 || ofs >= mtd->size)
1053 return -EINVAL;
1054 if (!mtd->_block_isreserved)
1055 return 0;
1056 return mtd->_block_isreserved(mtd, ofs);
1057}
1058EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1059
1060int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1061{
1062 if (ofs < 0 || ofs >= mtd->size)
1063 return -EINVAL;
1064 if (!mtd->_block_isbad)
1065 return 0;
1066 return mtd->_block_isbad(mtd, ofs);
1067}
1068EXPORT_SYMBOL_GPL(mtd_block_isbad);
1069
1070int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1071{
1072 if (!mtd->_block_markbad)
1073 return -EOPNOTSUPP;
1074 if (ofs < 0 || ofs >= mtd->size)
1075 return -EINVAL;
1076 if (!(mtd->flags & MTD_WRITEABLE))
1077 return -EROFS;
1078 return mtd->_block_markbad(mtd, ofs);
1079}
1080EXPORT_SYMBOL_GPL(mtd_block_markbad);
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1094 unsigned long count, loff_t to, size_t *retlen)
1095{
1096 unsigned long i;
1097 size_t totlen = 0, thislen;
1098 int ret = 0;
1099
1100 for (i = 0; i < count; i++) {
1101 if (!vecs[i].iov_len)
1102 continue;
1103 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1104 vecs[i].iov_base);
1105 totlen += thislen;
1106 if (ret || thislen != vecs[i].iov_len)
1107 break;
1108 to += vecs[i].iov_len;
1109 }
1110 *retlen = totlen;
1111 return ret;
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1126 unsigned long count, loff_t to, size_t *retlen)
1127{
1128 *retlen = 0;
1129 if (!(mtd->flags & MTD_WRITEABLE))
1130 return -EROFS;
1131 if (!mtd->_writev)
1132 return default_mtd_writev(mtd, vecs, count, to, retlen);
1133 return mtd->_writev(mtd, vecs, count, to, retlen);
1134}
1135EXPORT_SYMBOL_GPL(mtd_writev);
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1162{
1163 gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1164 __GFP_NORETRY | __GFP_NO_KSWAPD;
1165 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1166 void *kbuf;
1167
1168 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1169
1170 while (*size > min_alloc) {
1171 kbuf = kmalloc(*size, flags);
1172 if (kbuf)
1173 return kbuf;
1174
1175 *size >>= 1;
1176 *size = ALIGN(*size, mtd->writesize);
1177 }
1178
1179
1180
1181
1182
1183 return kmalloc(*size, GFP_KERNEL);
1184}
1185EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1186
1187#ifdef CONFIG_PROC_FS
1188
1189
1190
1191
1192static int mtd_proc_show(struct seq_file *m, void *v)
1193{
1194 struct mtd_info *mtd;
1195
1196 seq_puts(m, "dev: size erasesize name\n");
1197 mutex_lock(&mtd_table_mutex);
1198 mtd_for_each_device(mtd) {
1199 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1200 mtd->index, (unsigned long long)mtd->size,
1201 mtd->erasesize, mtd->name);
1202 }
1203 mutex_unlock(&mtd_table_mutex);
1204 return 0;
1205}
1206
1207static int mtd_proc_open(struct inode *inode, struct file *file)
1208{
1209 return single_open(file, mtd_proc_show, NULL);
1210}
1211
1212static const struct file_operations mtd_proc_ops = {
1213 .open = mtd_proc_open,
1214 .read = seq_read,
1215 .llseek = seq_lseek,
1216 .release = single_release,
1217};
1218#endif
1219
1220
1221
1222
1223static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
1224{
1225 int ret;
1226
1227 ret = bdi_init(bdi);
1228 if (!ret)
1229 ret = bdi_register(bdi, NULL, "%s", name);
1230
1231 if (ret)
1232 bdi_destroy(bdi);
1233
1234 return ret;
1235}
1236
1237static struct proc_dir_entry *proc_mtd;
1238
1239static int __init init_mtd(void)
1240{
1241 int ret;
1242
1243 ret = class_register(&mtd_class);
1244 if (ret)
1245 goto err_reg;
1246
1247 ret = mtd_bdi_init(&mtd_bdi, "mtd");
1248 if (ret)
1249 goto err_bdi;
1250
1251 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1252
1253 ret = init_mtdchar();
1254 if (ret)
1255 goto out_procfs;
1256
1257 return 0;
1258
1259out_procfs:
1260 if (proc_mtd)
1261 remove_proc_entry("mtd", NULL);
1262err_bdi:
1263 class_unregister(&mtd_class);
1264err_reg:
1265 pr_err("Error registering mtd class or bdi: %d\n", ret);
1266 return ret;
1267}
1268
1269static void __exit cleanup_mtd(void)
1270{
1271 cleanup_mtdchar();
1272 if (proc_mtd)
1273 remove_proc_entry("mtd", NULL);
1274 class_unregister(&mtd_class);
1275 bdi_destroy(&mtd_bdi);
1276}
1277
1278module_init(init_mtd);
1279module_exit(cleanup_mtd);
1280
1281MODULE_LICENSE("GPL");
1282MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1283MODULE_DESCRIPTION("Core MTD registration and access routines");
1284