1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/ptrace.h>
27#include <linux/seq_file.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/major.h>
31#include <linux/fs.h>
32#include <linux/err.h>
33#include <linux/ioctl.h>
34#include <linux/init.h>
35#include <linux/proc_fs.h>
36#include <linux/idr.h>
37#include <linux/backing-dev.h>
38#include <linux/gfp.h>
39#include <linux/slab.h>
40
41#include <linux/mtd/mtd.h>
42#include <linux/mtd/partitions.h>
43
44#include "mtdcore.h"
45
46
47
48
49
50static struct backing_dev_info mtd_bdi_unmappable = {
51 .capabilities = BDI_CAP_MAP_COPY,
52};
53
54
55
56
57
58
59static struct backing_dev_info mtd_bdi_ro_mappable = {
60 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
61 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
62};
63
64
65
66
67
68
69static struct backing_dev_info mtd_bdi_rw_mappable = {
70 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
71 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
72 BDI_CAP_WRITE_MAP),
73};
74
75static int mtd_cls_suspend(struct device *dev, pm_message_t state);
76static int mtd_cls_resume(struct device *dev);
77
78static struct class mtd_class = {
79 .name = "mtd",
80 .owner = THIS_MODULE,
81 .suspend = mtd_cls_suspend,
82 .resume = mtd_cls_resume,
83};
84
85static DEFINE_IDR(mtd_idr);
86
87
88
89DEFINE_MUTEX(mtd_table_mutex);
90EXPORT_SYMBOL_GPL(mtd_table_mutex);
91
92struct mtd_info *__mtd_next_device(int i)
93{
94 return idr_get_next(&mtd_idr, &i);
95}
96EXPORT_SYMBOL_GPL(__mtd_next_device);
97
98static LIST_HEAD(mtd_notifiers);
99
100
101#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
102
103
104
105
106static void mtd_release(struct device *dev)
107{
108 struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
109 dev_t index = MTD_DEVT(mtd->index);
110
111
112 if (index)
113 device_destroy(&mtd_class, index + 1);
114}
115
116static int mtd_cls_suspend(struct device *dev, pm_message_t state)
117{
118 struct mtd_info *mtd = dev_get_drvdata(dev);
119
120 return mtd ? mtd_suspend(mtd) : 0;
121}
122
123static int mtd_cls_resume(struct device *dev)
124{
125 struct mtd_info *mtd = dev_get_drvdata(dev);
126
127 if (mtd)
128 mtd_resume(mtd);
129 return 0;
130}
131
132static ssize_t mtd_type_show(struct device *dev,
133 struct device_attribute *attr, char *buf)
134{
135 struct mtd_info *mtd = dev_get_drvdata(dev);
136 char *type;
137
138 switch (mtd->type) {
139 case MTD_ABSENT:
140 type = "absent";
141 break;
142 case MTD_RAM:
143 type = "ram";
144 break;
145 case MTD_ROM:
146 type = "rom";
147 break;
148 case MTD_NORFLASH:
149 type = "nor";
150 break;
151 case MTD_NANDFLASH:
152 type = "nand";
153 break;
154 case MTD_DATAFLASH:
155 type = "dataflash";
156 break;
157 case MTD_UBIVOLUME:
158 type = "ubi";
159 break;
160 default:
161 type = "unknown";
162 }
163
164 return snprintf(buf, PAGE_SIZE, "%s\n", type);
165}
166static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
167
168static ssize_t mtd_flags_show(struct device *dev,
169 struct device_attribute *attr, char *buf)
170{
171 struct mtd_info *mtd = dev_get_drvdata(dev);
172
173 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
174
175}
176static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
177
178static ssize_t mtd_size_show(struct device *dev,
179 struct device_attribute *attr, char *buf)
180{
181 struct mtd_info *mtd = dev_get_drvdata(dev);
182
183 return snprintf(buf, PAGE_SIZE, "%llu\n",
184 (unsigned long long)mtd->size);
185
186}
187static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
188
189static ssize_t mtd_erasesize_show(struct device *dev,
190 struct device_attribute *attr, char *buf)
191{
192 struct mtd_info *mtd = dev_get_drvdata(dev);
193
194 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
195
196}
197static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
198
199static ssize_t mtd_writesize_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
201{
202 struct mtd_info *mtd = dev_get_drvdata(dev);
203
204 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
205
206}
207static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
208
209static ssize_t mtd_subpagesize_show(struct device *dev,
210 struct device_attribute *attr, char *buf)
211{
212 struct mtd_info *mtd = dev_get_drvdata(dev);
213 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
214
215 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
216
217}
218static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
219
220static ssize_t mtd_oobsize_show(struct device *dev,
221 struct device_attribute *attr, char *buf)
222{
223 struct mtd_info *mtd = dev_get_drvdata(dev);
224
225 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
226
227}
228static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
229
230static ssize_t mtd_numeraseregions_show(struct device *dev,
231 struct device_attribute *attr, char *buf)
232{
233 struct mtd_info *mtd = dev_get_drvdata(dev);
234
235 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
236
237}
238static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
239 NULL);
240
241static ssize_t mtd_name_show(struct device *dev,
242 struct device_attribute *attr, char *buf)
243{
244 struct mtd_info *mtd = dev_get_drvdata(dev);
245
246 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
247
248}
249static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
250
251static ssize_t mtd_ecc_strength_show(struct device *dev,
252 struct device_attribute *attr, char *buf)
253{
254 struct mtd_info *mtd = dev_get_drvdata(dev);
255
256 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
257}
258static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
259
260static ssize_t mtd_bitflip_threshold_show(struct device *dev,
261 struct device_attribute *attr,
262 char *buf)
263{
264 struct mtd_info *mtd = dev_get_drvdata(dev);
265
266 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
267}
268
269static ssize_t mtd_bitflip_threshold_store(struct device *dev,
270 struct device_attribute *attr,
271 const char *buf, size_t count)
272{
273 struct mtd_info *mtd = dev_get_drvdata(dev);
274 unsigned int bitflip_threshold;
275 int retval;
276
277 retval = kstrtouint(buf, 0, &bitflip_threshold);
278 if (retval)
279 return retval;
280
281 mtd->bitflip_threshold = bitflip_threshold;
282 return count;
283}
284static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
285 mtd_bitflip_threshold_show,
286 mtd_bitflip_threshold_store);
287
288static struct attribute *mtd_attrs[] = {
289 &dev_attr_type.attr,
290 &dev_attr_flags.attr,
291 &dev_attr_size.attr,
292 &dev_attr_erasesize.attr,
293 &dev_attr_writesize.attr,
294 &dev_attr_subpagesize.attr,
295 &dev_attr_oobsize.attr,
296 &dev_attr_numeraseregions.attr,
297 &dev_attr_name.attr,
298 &dev_attr_ecc_strength.attr,
299 &dev_attr_bitflip_threshold.attr,
300 NULL,
301};
302
303static struct attribute_group mtd_group = {
304 .attrs = mtd_attrs,
305};
306
307static const struct attribute_group *mtd_groups[] = {
308 &mtd_group,
309 NULL,
310};
311
312static struct device_type mtd_devtype = {
313 .name = "mtd",
314 .groups = mtd_groups,
315 .release = mtd_release,
316};
317
318
319
320
321
322
323
324
325
326
327
328int add_mtd_device(struct mtd_info *mtd)
329{
330 struct mtd_notifier *not;
331 int i, error;
332
333 if (!mtd->backing_dev_info) {
334 switch (mtd->type) {
335 case MTD_RAM:
336 mtd->backing_dev_info = &mtd_bdi_rw_mappable;
337 break;
338 case MTD_ROM:
339 mtd->backing_dev_info = &mtd_bdi_ro_mappable;
340 break;
341 default:
342 mtd->backing_dev_info = &mtd_bdi_unmappable;
343 break;
344 }
345 }
346
347 BUG_ON(mtd->writesize == 0);
348 mutex_lock(&mtd_table_mutex);
349
350 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
351 if (i < 0)
352 goto fail_locked;
353
354 mtd->index = i;
355 mtd->usecount = 0;
356
357
358 if (mtd->bitflip_threshold == 0)
359 mtd->bitflip_threshold = mtd->ecc_strength;
360
361 if (is_power_of_2(mtd->erasesize))
362 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
363 else
364 mtd->erasesize_shift = 0;
365
366 if (is_power_of_2(mtd->writesize))
367 mtd->writesize_shift = ffs(mtd->writesize) - 1;
368 else
369 mtd->writesize_shift = 0;
370
371 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
372 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
373
374
375 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
376 error = mtd_unlock(mtd, 0, mtd->size);
377 if (error && error != -EOPNOTSUPP)
378 printk(KERN_WARNING
379 "%s: unlock failed, writes may not work\n",
380 mtd->name);
381 }
382
383
384
385
386 mtd->dev.type = &mtd_devtype;
387 mtd->dev.class = &mtd_class;
388 mtd->dev.devt = MTD_DEVT(i);
389 dev_set_name(&mtd->dev, "mtd%d", i);
390 dev_set_drvdata(&mtd->dev, mtd);
391 if (device_register(&mtd->dev) != 0)
392 goto fail_added;
393
394 if (MTD_DEVT(i))
395 device_create(&mtd_class, mtd->dev.parent,
396 MTD_DEVT(i) + 1,
397 NULL, "mtd%dro", i);
398
399 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
400
401
402 list_for_each_entry(not, &mtd_notifiers, list)
403 not->add(mtd);
404
405 mutex_unlock(&mtd_table_mutex);
406
407
408
409
410 __module_get(THIS_MODULE);
411 return 0;
412
413fail_added:
414 idr_remove(&mtd_idr, i);
415fail_locked:
416 mutex_unlock(&mtd_table_mutex);
417 return 1;
418}
419
420
421
422
423
424
425
426
427
428
429
430int del_mtd_device(struct mtd_info *mtd)
431{
432 int ret;
433 struct mtd_notifier *not;
434
435 mutex_lock(&mtd_table_mutex);
436
437 if (idr_find(&mtd_idr, mtd->index) != mtd) {
438 ret = -ENODEV;
439 goto out_error;
440 }
441
442
443
444 list_for_each_entry(not, &mtd_notifiers, list)
445 not->remove(mtd);
446
447 if (mtd->usecount) {
448 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
449 mtd->index, mtd->name, mtd->usecount);
450 ret = -EBUSY;
451 } else {
452 device_unregister(&mtd->dev);
453
454 idr_remove(&mtd_idr, mtd->index);
455
456 module_put(THIS_MODULE);
457 ret = 0;
458 }
459
460out_error:
461 mutex_unlock(&mtd_table_mutex);
462 return ret;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
494 struct mtd_part_parser_data *parser_data,
495 const struct mtd_partition *parts,
496 int nr_parts)
497{
498 int err;
499 struct mtd_partition *real_parts;
500
501 err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
502 if (err <= 0 && nr_parts && parts) {
503 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
504 GFP_KERNEL);
505 if (!real_parts)
506 err = -ENOMEM;
507 else
508 err = nr_parts;
509 }
510
511 if (err > 0) {
512 err = add_mtd_partitions(mtd, real_parts, err);
513 kfree(real_parts);
514 } else if (err == 0) {
515 err = add_mtd_device(mtd);
516 if (err == 1)
517 err = -ENODEV;
518 }
519
520 return err;
521}
522EXPORT_SYMBOL_GPL(mtd_device_parse_register);
523
524
525
526
527
528
529
530int mtd_device_unregister(struct mtd_info *master)
531{
532 int err;
533
534 err = del_mtd_partitions(master);
535 if (err)
536 return err;
537
538 if (!device_is_registered(&master->dev))
539 return 0;
540
541 return del_mtd_device(master);
542}
543EXPORT_SYMBOL_GPL(mtd_device_unregister);
544
545
546
547
548
549
550
551
552
553void register_mtd_user (struct mtd_notifier *new)
554{
555 struct mtd_info *mtd;
556
557 mutex_lock(&mtd_table_mutex);
558
559 list_add(&new->list, &mtd_notifiers);
560
561 __module_get(THIS_MODULE);
562
563 mtd_for_each_device(mtd)
564 new->add(mtd);
565
566 mutex_unlock(&mtd_table_mutex);
567}
568EXPORT_SYMBOL_GPL(register_mtd_user);
569
570
571
572
573
574
575
576
577
578
579int unregister_mtd_user (struct mtd_notifier *old)
580{
581 struct mtd_info *mtd;
582
583 mutex_lock(&mtd_table_mutex);
584
585 module_put(THIS_MODULE);
586
587 mtd_for_each_device(mtd)
588 old->remove(mtd);
589
590 list_del(&old->list);
591 mutex_unlock(&mtd_table_mutex);
592 return 0;
593}
594EXPORT_SYMBOL_GPL(unregister_mtd_user);
595
596
597
598
599
600
601
602
603
604
605
606
607struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
608{
609 struct mtd_info *ret = NULL, *other;
610 int err = -ENODEV;
611
612 mutex_lock(&mtd_table_mutex);
613
614 if (num == -1) {
615 mtd_for_each_device(other) {
616 if (other == mtd) {
617 ret = mtd;
618 break;
619 }
620 }
621 } else if (num >= 0) {
622 ret = idr_find(&mtd_idr, num);
623 if (mtd && mtd != ret)
624 ret = NULL;
625 }
626
627 if (!ret) {
628 ret = ERR_PTR(err);
629 goto out;
630 }
631
632 err = __get_mtd_device(ret);
633 if (err)
634 ret = ERR_PTR(err);
635out:
636 mutex_unlock(&mtd_table_mutex);
637 return ret;
638}
639EXPORT_SYMBOL_GPL(get_mtd_device);
640
641
642int __get_mtd_device(struct mtd_info *mtd)
643{
644 int err;
645
646 if (!try_module_get(mtd->owner))
647 return -ENODEV;
648
649 if (mtd->_get_device) {
650 err = mtd->_get_device(mtd);
651
652 if (err) {
653 module_put(mtd->owner);
654 return err;
655 }
656 }
657 mtd->usecount++;
658 return 0;
659}
660EXPORT_SYMBOL_GPL(__get_mtd_device);
661
662
663
664
665
666
667
668
669
670struct mtd_info *get_mtd_device_nm(const char *name)
671{
672 int err = -ENODEV;
673 struct mtd_info *mtd = NULL, *other;
674
675 mutex_lock(&mtd_table_mutex);
676
677 mtd_for_each_device(other) {
678 if (!strcmp(name, other->name)) {
679 mtd = other;
680 break;
681 }
682 }
683
684 if (!mtd)
685 goto out_unlock;
686
687 err = __get_mtd_device(mtd);
688 if (err)
689 goto out_unlock;
690
691 mutex_unlock(&mtd_table_mutex);
692 return mtd;
693
694out_unlock:
695 mutex_unlock(&mtd_table_mutex);
696 return ERR_PTR(err);
697}
698EXPORT_SYMBOL_GPL(get_mtd_device_nm);
699
700void put_mtd_device(struct mtd_info *mtd)
701{
702 mutex_lock(&mtd_table_mutex);
703 __put_mtd_device(mtd);
704 mutex_unlock(&mtd_table_mutex);
705
706}
707EXPORT_SYMBOL_GPL(put_mtd_device);
708
709void __put_mtd_device(struct mtd_info *mtd)
710{
711 --mtd->usecount;
712 BUG_ON(mtd->usecount < 0);
713
714 if (mtd->_put_device)
715 mtd->_put_device(mtd);
716
717 module_put(mtd->owner);
718}
719EXPORT_SYMBOL_GPL(__put_mtd_device);
720
721
722
723
724
725
726
727
728int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
729{
730 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
731 return -EINVAL;
732 if (!(mtd->flags & MTD_WRITEABLE))
733 return -EROFS;
734 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
735 if (!instr->len) {
736 instr->state = MTD_ERASE_DONE;
737 mtd_erase_callback(instr);
738 return 0;
739 }
740 return mtd->_erase(mtd, instr);
741}
742EXPORT_SYMBOL_GPL(mtd_erase);
743
744
745
746
747int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
748 void **virt, resource_size_t *phys)
749{
750 *retlen = 0;
751 *virt = NULL;
752 if (phys)
753 *phys = 0;
754 if (!mtd->_point)
755 return -EOPNOTSUPP;
756 if (from < 0 || from > mtd->size || len > mtd->size - from)
757 return -EINVAL;
758 if (!len)
759 return 0;
760 return mtd->_point(mtd, from, len, retlen, virt, phys);
761}
762EXPORT_SYMBOL_GPL(mtd_point);
763
764
765int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
766{
767 if (!mtd->_point)
768 return -EOPNOTSUPP;
769 if (from < 0 || from > mtd->size || len > mtd->size - from)
770 return -EINVAL;
771 if (!len)
772 return 0;
773 return mtd->_unpoint(mtd, from, len);
774}
775EXPORT_SYMBOL_GPL(mtd_unpoint);
776
777
778
779
780
781
782unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
783 unsigned long offset, unsigned long flags)
784{
785 if (!mtd->_get_unmapped_area)
786 return -EOPNOTSUPP;
787 if (offset > mtd->size || len > mtd->size - offset)
788 return -EINVAL;
789 return mtd->_get_unmapped_area(mtd, len, offset, flags);
790}
791EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
792
793int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
794 u_char *buf)
795{
796 int ret_code;
797 *retlen = 0;
798 if (from < 0 || from > mtd->size || len > mtd->size - from)
799 return -EINVAL;
800 if (!len)
801 return 0;
802
803
804
805
806
807
808 ret_code = mtd->_read(mtd, from, len, retlen, buf);
809 if (unlikely(ret_code < 0))
810 return ret_code;
811 if (mtd->ecc_strength == 0)
812 return 0;
813 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
814}
815EXPORT_SYMBOL_GPL(mtd_read);
816
817int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
818 const u_char *buf)
819{
820 *retlen = 0;
821 if (to < 0 || to > mtd->size || len > mtd->size - to)
822 return -EINVAL;
823 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
824 return -EROFS;
825 if (!len)
826 return 0;
827 return mtd->_write(mtd, to, len, retlen, buf);
828}
829EXPORT_SYMBOL_GPL(mtd_write);
830
831
832
833
834
835
836
837
838int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
839 const u_char *buf)
840{
841 *retlen = 0;
842 if (!mtd->_panic_write)
843 return -EOPNOTSUPP;
844 if (to < 0 || to > mtd->size || len > mtd->size - to)
845 return -EINVAL;
846 if (!(mtd->flags & MTD_WRITEABLE))
847 return -EROFS;
848 if (!len)
849 return 0;
850 return mtd->_panic_write(mtd, to, len, retlen, buf);
851}
852EXPORT_SYMBOL_GPL(mtd_panic_write);
853
854int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
855{
856 int ret_code;
857 ops->retlen = ops->oobretlen = 0;
858 if (!mtd->_read_oob)
859 return -EOPNOTSUPP;
860
861
862
863
864
865
866 ret_code = mtd->_read_oob(mtd, from, ops);
867 if (unlikely(ret_code < 0))
868 return ret_code;
869 if (mtd->ecc_strength == 0)
870 return 0;
871 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
872}
873EXPORT_SYMBOL_GPL(mtd_read_oob);
874
875
876
877
878
879
880int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
881 size_t len)
882{
883 if (!mtd->_get_fact_prot_info)
884 return -EOPNOTSUPP;
885 if (!len)
886 return 0;
887 return mtd->_get_fact_prot_info(mtd, buf, len);
888}
889EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
890
891int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
892 size_t *retlen, u_char *buf)
893{
894 *retlen = 0;
895 if (!mtd->_read_fact_prot_reg)
896 return -EOPNOTSUPP;
897 if (!len)
898 return 0;
899 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
900}
901EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
902
903int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
904 size_t len)
905{
906 if (!mtd->_get_user_prot_info)
907 return -EOPNOTSUPP;
908 if (!len)
909 return 0;
910 return mtd->_get_user_prot_info(mtd, buf, len);
911}
912EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
913
914int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
915 size_t *retlen, u_char *buf)
916{
917 *retlen = 0;
918 if (!mtd->_read_user_prot_reg)
919 return -EOPNOTSUPP;
920 if (!len)
921 return 0;
922 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
923}
924EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
925
926int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
927 size_t *retlen, u_char *buf)
928{
929 *retlen = 0;
930 if (!mtd->_write_user_prot_reg)
931 return -EOPNOTSUPP;
932 if (!len)
933 return 0;
934 return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
935}
936EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
937
938int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
939{
940 if (!mtd->_lock_user_prot_reg)
941 return -EOPNOTSUPP;
942 if (!len)
943 return 0;
944 return mtd->_lock_user_prot_reg(mtd, from, len);
945}
946EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
947
948
949int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
950{
951 if (!mtd->_lock)
952 return -EOPNOTSUPP;
953 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
954 return -EINVAL;
955 if (!len)
956 return 0;
957 return mtd->_lock(mtd, ofs, len);
958}
959EXPORT_SYMBOL_GPL(mtd_lock);
960
961int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
962{
963 if (!mtd->_unlock)
964 return -EOPNOTSUPP;
965 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
966 return -EINVAL;
967 if (!len)
968 return 0;
969 return mtd->_unlock(mtd, ofs, len);
970}
971EXPORT_SYMBOL_GPL(mtd_unlock);
972
973int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
974{
975 if (!mtd->_is_locked)
976 return -EOPNOTSUPP;
977 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
978 return -EINVAL;
979 if (!len)
980 return 0;
981 return mtd->_is_locked(mtd, ofs, len);
982}
983EXPORT_SYMBOL_GPL(mtd_is_locked);
984
985int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
986{
987 if (!mtd->_block_isbad)
988 return 0;
989 if (ofs < 0 || ofs > mtd->size)
990 return -EINVAL;
991 return mtd->_block_isbad(mtd, ofs);
992}
993EXPORT_SYMBOL_GPL(mtd_block_isbad);
994
995int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
996{
997 if (!mtd->_block_markbad)
998 return -EOPNOTSUPP;
999 if (ofs < 0 || ofs > mtd->size)
1000 return -EINVAL;
1001 if (!(mtd->flags & MTD_WRITEABLE))
1002 return -EROFS;
1003 return mtd->_block_markbad(mtd, ofs);
1004}
1005EXPORT_SYMBOL_GPL(mtd_block_markbad);
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1019 unsigned long count, loff_t to, size_t *retlen)
1020{
1021 unsigned long i;
1022 size_t totlen = 0, thislen;
1023 int ret = 0;
1024
1025 for (i = 0; i < count; i++) {
1026 if (!vecs[i].iov_len)
1027 continue;
1028 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1029 vecs[i].iov_base);
1030 totlen += thislen;
1031 if (ret || thislen != vecs[i].iov_len)
1032 break;
1033 to += vecs[i].iov_len;
1034 }
1035 *retlen = totlen;
1036 return ret;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1051 unsigned long count, loff_t to, size_t *retlen)
1052{
1053 *retlen = 0;
1054 if (!(mtd->flags & MTD_WRITEABLE))
1055 return -EROFS;
1056 if (!mtd->_writev)
1057 return default_mtd_writev(mtd, vecs, count, to, retlen);
1058 return mtd->_writev(mtd, vecs, count, to, retlen);
1059}
1060EXPORT_SYMBOL_GPL(mtd_writev);
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1087{
1088 gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1089 __GFP_NORETRY | __GFP_NO_KSWAPD;
1090 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1091 void *kbuf;
1092
1093 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1094
1095 while (*size > min_alloc) {
1096 kbuf = kmalloc(*size, flags);
1097 if (kbuf)
1098 return kbuf;
1099
1100 *size >>= 1;
1101 *size = ALIGN(*size, mtd->writesize);
1102 }
1103
1104
1105
1106
1107
1108 return kmalloc(*size, GFP_KERNEL);
1109}
1110EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1111
1112#ifdef CONFIG_PROC_FS
1113
1114
1115
1116
1117static int mtd_proc_show(struct seq_file *m, void *v)
1118{
1119 struct mtd_info *mtd;
1120
1121 seq_puts(m, "dev: size erasesize name\n");
1122 mutex_lock(&mtd_table_mutex);
1123 mtd_for_each_device(mtd) {
1124 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1125 mtd->index, (unsigned long long)mtd->size,
1126 mtd->erasesize, mtd->name);
1127 }
1128 mutex_unlock(&mtd_table_mutex);
1129 return 0;
1130}
1131
1132static int mtd_proc_open(struct inode *inode, struct file *file)
1133{
1134 return single_open(file, mtd_proc_show, NULL);
1135}
1136
1137static const struct file_operations mtd_proc_ops = {
1138 .open = mtd_proc_open,
1139 .read = seq_read,
1140 .llseek = seq_lseek,
1141 .release = single_release,
1142};
1143#endif
1144
1145
1146
1147
1148static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
1149{
1150 int ret;
1151
1152 ret = bdi_init(bdi);
1153 if (!ret)
1154 ret = bdi_register(bdi, NULL, "%s", name);
1155
1156 if (ret)
1157 bdi_destroy(bdi);
1158
1159 return ret;
1160}
1161
1162static struct proc_dir_entry *proc_mtd;
1163
1164static int __init init_mtd(void)
1165{
1166 int ret;
1167
1168 ret = class_register(&mtd_class);
1169 if (ret)
1170 goto err_reg;
1171
1172 ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
1173 if (ret)
1174 goto err_bdi1;
1175
1176 ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
1177 if (ret)
1178 goto err_bdi2;
1179
1180 ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
1181 if (ret)
1182 goto err_bdi3;
1183
1184 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1185
1186 ret = init_mtdchar();
1187 if (ret)
1188 goto out_procfs;
1189
1190 return 0;
1191
1192out_procfs:
1193 if (proc_mtd)
1194 remove_proc_entry("mtd", NULL);
1195err_bdi3:
1196 bdi_destroy(&mtd_bdi_ro_mappable);
1197err_bdi2:
1198 bdi_destroy(&mtd_bdi_unmappable);
1199err_bdi1:
1200 class_unregister(&mtd_class);
1201err_reg:
1202 pr_err("Error registering mtd class or bdi: %d\n", ret);
1203 return ret;
1204}
1205
1206static void __exit cleanup_mtd(void)
1207{
1208 cleanup_mtdchar();
1209 if (proc_mtd)
1210 remove_proc_entry("mtd", NULL);
1211 class_unregister(&mtd_class);
1212 bdi_destroy(&mtd_bdi_unmappable);
1213 bdi_destroy(&mtd_bdi_ro_mappable);
1214 bdi_destroy(&mtd_bdi_rw_mappable);
1215}
1216
1217module_init(init_mtd);
1218module_exit(cleanup_mtd);
1219
1220MODULE_LICENSE("GPL");
1221MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1222MODULE_DESCRIPTION("Core MTD registration and access routines");
1223