1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/ptrace.h>
27#include <linux/seq_file.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/major.h>
31#include <linux/fs.h>
32#include <linux/err.h>
33#include <linux/ioctl.h>
34#include <linux/init.h>
35#include <linux/proc_fs.h>
36#include <linux/idr.h>
37#include <linux/backing-dev.h>
38#include <linux/gfp.h>
39
40#include <linux/mtd/mtd.h>
41#include <linux/mtd/partitions.h>
42
43#include "mtdcore.h"
44
45
46
47
48static struct backing_dev_info mtd_bdi_unmappable = {
49 .capabilities = BDI_CAP_MAP_COPY,
50};
51
52
53
54
55
56
57static struct backing_dev_info mtd_bdi_ro_mappable = {
58 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
59 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
60};
61
62
63
64
65
66
67static struct backing_dev_info mtd_bdi_rw_mappable = {
68 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
69 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
70 BDI_CAP_WRITE_MAP),
71};
72
73static int mtd_cls_suspend(struct device *dev, pm_message_t state);
74static int mtd_cls_resume(struct device *dev);
75
76static struct class mtd_class = {
77 .name = "mtd",
78 .owner = THIS_MODULE,
79 .suspend = mtd_cls_suspend,
80 .resume = mtd_cls_resume,
81};
82
83static DEFINE_IDR(mtd_idr);
84
85
86
87DEFINE_MUTEX(mtd_table_mutex);
88EXPORT_SYMBOL_GPL(mtd_table_mutex);
89
90struct mtd_info *__mtd_next_device(int i)
91{
92 return idr_get_next(&mtd_idr, &i);
93}
94EXPORT_SYMBOL_GPL(__mtd_next_device);
95
96static LIST_HEAD(mtd_notifiers);
97
98
99#if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE)
100#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
101#else
102#define MTD_DEVT(index) 0
103#endif
104
105
106
107
108static void mtd_release(struct device *dev)
109{
110 dev_t index = MTD_DEVT(dev_to_mtd(dev)->index);
111
112
113 if (index)
114 device_destroy(&mtd_class, index + 1);
115}
116
117static int mtd_cls_suspend(struct device *dev, pm_message_t state)
118{
119 struct mtd_info *mtd = dev_to_mtd(dev);
120
121 if (mtd && mtd->suspend)
122 return mtd->suspend(mtd);
123 else
124 return 0;
125}
126
127static int mtd_cls_resume(struct device *dev)
128{
129 struct mtd_info *mtd = dev_to_mtd(dev);
130
131 if (mtd && mtd->resume)
132 mtd->resume(mtd);
133 return 0;
134}
135
136static ssize_t mtd_type_show(struct device *dev,
137 struct device_attribute *attr, char *buf)
138{
139 struct mtd_info *mtd = dev_to_mtd(dev);
140 char *type;
141
142 switch (mtd->type) {
143 case MTD_ABSENT:
144 type = "absent";
145 break;
146 case MTD_RAM:
147 type = "ram";
148 break;
149 case MTD_ROM:
150 type = "rom";
151 break;
152 case MTD_NORFLASH:
153 type = "nor";
154 break;
155 case MTD_NANDFLASH:
156 type = "nand";
157 break;
158 case MTD_DATAFLASH:
159 type = "dataflash";
160 break;
161 case MTD_UBIVOLUME:
162 type = "ubi";
163 break;
164 default:
165 type = "unknown";
166 }
167
168 return snprintf(buf, PAGE_SIZE, "%s\n", type);
169}
170static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
171
172static ssize_t mtd_flags_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
174{
175 struct mtd_info *mtd = dev_to_mtd(dev);
176
177 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
178
179}
180static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
181
182static ssize_t mtd_size_show(struct device *dev,
183 struct device_attribute *attr, char *buf)
184{
185 struct mtd_info *mtd = dev_to_mtd(dev);
186
187 return snprintf(buf, PAGE_SIZE, "%llu\n",
188 (unsigned long long)mtd->size);
189
190}
191static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
192
193static ssize_t mtd_erasesize_show(struct device *dev,
194 struct device_attribute *attr, char *buf)
195{
196 struct mtd_info *mtd = dev_to_mtd(dev);
197
198 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
199
200}
201static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
202
203static ssize_t mtd_writesize_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
206 struct mtd_info *mtd = dev_to_mtd(dev);
207
208 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
209
210}
211static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
212
213static ssize_t mtd_subpagesize_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
215{
216 struct mtd_info *mtd = dev_to_mtd(dev);
217 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
218
219 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
220
221}
222static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
223
224static ssize_t mtd_oobsize_show(struct device *dev,
225 struct device_attribute *attr, char *buf)
226{
227 struct mtd_info *mtd = dev_to_mtd(dev);
228
229 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
230
231}
232static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
233
234static ssize_t mtd_numeraseregions_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
236{
237 struct mtd_info *mtd = dev_to_mtd(dev);
238
239 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
240
241}
242static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
243 NULL);
244
245static ssize_t mtd_name_show(struct device *dev,
246 struct device_attribute *attr, char *buf)
247{
248 struct mtd_info *mtd = dev_to_mtd(dev);
249
250 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
251
252}
253static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
254
255static struct attribute *mtd_attrs[] = {
256 &dev_attr_type.attr,
257 &dev_attr_flags.attr,
258 &dev_attr_size.attr,
259 &dev_attr_erasesize.attr,
260 &dev_attr_writesize.attr,
261 &dev_attr_subpagesize.attr,
262 &dev_attr_oobsize.attr,
263 &dev_attr_numeraseregions.attr,
264 &dev_attr_name.attr,
265 NULL,
266};
267
268static struct attribute_group mtd_group = {
269 .attrs = mtd_attrs,
270};
271
272static const struct attribute_group *mtd_groups[] = {
273 &mtd_group,
274 NULL,
275};
276
277static struct device_type mtd_devtype = {
278 .name = "mtd",
279 .groups = mtd_groups,
280 .release = mtd_release,
281};
282
283
284
285
286
287
288
289
290
291
292
293int add_mtd_device(struct mtd_info *mtd)
294{
295 struct mtd_notifier *not;
296 int i, error;
297
298 if (!mtd->backing_dev_info) {
299 switch (mtd->type) {
300 case MTD_RAM:
301 mtd->backing_dev_info = &mtd_bdi_rw_mappable;
302 break;
303 case MTD_ROM:
304 mtd->backing_dev_info = &mtd_bdi_ro_mappable;
305 break;
306 default:
307 mtd->backing_dev_info = &mtd_bdi_unmappable;
308 break;
309 }
310 }
311
312 BUG_ON(mtd->writesize == 0);
313 mutex_lock(&mtd_table_mutex);
314
315 do {
316 if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
317 goto fail_locked;
318 error = idr_get_new(&mtd_idr, mtd, &i);
319 } while (error == -EAGAIN);
320
321 if (error)
322 goto fail_locked;
323
324 mtd->index = i;
325 mtd->usecount = 0;
326
327 if (is_power_of_2(mtd->erasesize))
328 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
329 else
330 mtd->erasesize_shift = 0;
331
332 if (is_power_of_2(mtd->writesize))
333 mtd->writesize_shift = ffs(mtd->writesize) - 1;
334 else
335 mtd->writesize_shift = 0;
336
337 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
338 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
339
340
341 if ((mtd->flags & MTD_WRITEABLE)
342 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
343 if (mtd->unlock(mtd, 0, mtd->size))
344 printk(KERN_WARNING
345 "%s: unlock failed, writes may not work\n",
346 mtd->name);
347 }
348
349
350
351
352 mtd->dev.type = &mtd_devtype;
353 mtd->dev.class = &mtd_class;
354 mtd->dev.devt = MTD_DEVT(i);
355 dev_set_name(&mtd->dev, "mtd%d", i);
356 dev_set_drvdata(&mtd->dev, mtd);
357 if (device_register(&mtd->dev) != 0)
358 goto fail_added;
359
360 if (MTD_DEVT(i))
361 device_create(&mtd_class, mtd->dev.parent,
362 MTD_DEVT(i) + 1,
363 NULL, "mtd%dro", i);
364
365 DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
366
367
368 list_for_each_entry(not, &mtd_notifiers, list)
369 not->add(mtd);
370
371 mutex_unlock(&mtd_table_mutex);
372
373
374
375
376 __module_get(THIS_MODULE);
377 return 0;
378
379fail_added:
380 idr_remove(&mtd_idr, i);
381fail_locked:
382 mutex_unlock(&mtd_table_mutex);
383 return 1;
384}
385
386
387
388
389
390
391
392
393
394
395
396int del_mtd_device(struct mtd_info *mtd)
397{
398 int ret;
399 struct mtd_notifier *not;
400
401 mutex_lock(&mtd_table_mutex);
402
403 if (idr_find(&mtd_idr, mtd->index) != mtd) {
404 ret = -ENODEV;
405 goto out_error;
406 }
407
408
409
410 list_for_each_entry(not, &mtd_notifiers, list)
411 not->remove(mtd);
412
413 if (mtd->usecount) {
414 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
415 mtd->index, mtd->name, mtd->usecount);
416 ret = -EBUSY;
417 } else {
418 device_unregister(&mtd->dev);
419
420 idr_remove(&mtd_idr, mtd->index);
421
422 module_put(THIS_MODULE);
423 ret = 0;
424 }
425
426out_error:
427 mutex_unlock(&mtd_table_mutex);
428 return ret;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445int mtd_device_register(struct mtd_info *master,
446 const struct mtd_partition *parts,
447 int nr_parts)
448{
449 return parts ? add_mtd_partitions(master, parts, nr_parts) :
450 add_mtd_device(master);
451}
452EXPORT_SYMBOL_GPL(mtd_device_register);
453
454
455
456
457
458
459
460int mtd_device_unregister(struct mtd_info *master)
461{
462 int err;
463
464 err = del_mtd_partitions(master);
465 if (err)
466 return err;
467
468 if (!device_is_registered(&master->dev))
469 return 0;
470
471 return del_mtd_device(master);
472}
473EXPORT_SYMBOL_GPL(mtd_device_unregister);
474
475
476
477
478
479
480
481
482
483
484void register_mtd_user (struct mtd_notifier *new)
485{
486 struct mtd_info *mtd;
487
488 mutex_lock(&mtd_table_mutex);
489
490 list_add(&new->list, &mtd_notifiers);
491
492 __module_get(THIS_MODULE);
493
494 mtd_for_each_device(mtd)
495 new->add(mtd);
496
497 mutex_unlock(&mtd_table_mutex);
498}
499
500
501
502
503
504
505
506
507
508
509
510int unregister_mtd_user (struct mtd_notifier *old)
511{
512 struct mtd_info *mtd;
513
514 mutex_lock(&mtd_table_mutex);
515
516 module_put(THIS_MODULE);
517
518 mtd_for_each_device(mtd)
519 old->remove(mtd);
520
521 list_del(&old->list);
522 mutex_unlock(&mtd_table_mutex);
523 return 0;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
540{
541 struct mtd_info *ret = NULL, *other;
542 int err = -ENODEV;
543
544 mutex_lock(&mtd_table_mutex);
545
546 if (num == -1) {
547 mtd_for_each_device(other) {
548 if (other == mtd) {
549 ret = mtd;
550 break;
551 }
552 }
553 } else if (num >= 0) {
554 ret = idr_find(&mtd_idr, num);
555 if (mtd && mtd != ret)
556 ret = NULL;
557 }
558
559 if (!ret) {
560 ret = ERR_PTR(err);
561 goto out;
562 }
563
564 err = __get_mtd_device(ret);
565 if (err)
566 ret = ERR_PTR(err);
567out:
568 mutex_unlock(&mtd_table_mutex);
569 return ret;
570}
571
572
573int __get_mtd_device(struct mtd_info *mtd)
574{
575 int err;
576
577 if (!try_module_get(mtd->owner))
578 return -ENODEV;
579
580 if (mtd->get_device) {
581 err = mtd->get_device(mtd);
582
583 if (err) {
584 module_put(mtd->owner);
585 return err;
586 }
587 }
588 mtd->usecount++;
589 return 0;
590}
591
592
593
594
595
596
597
598
599
600
601struct mtd_info *get_mtd_device_nm(const char *name)
602{
603 int err = -ENODEV;
604 struct mtd_info *mtd = NULL, *other;
605
606 mutex_lock(&mtd_table_mutex);
607
608 mtd_for_each_device(other) {
609 if (!strcmp(name, other->name)) {
610 mtd = other;
611 break;
612 }
613 }
614
615 if (!mtd)
616 goto out_unlock;
617
618 err = __get_mtd_device(mtd);
619 if (err)
620 goto out_unlock;
621
622 mutex_unlock(&mtd_table_mutex);
623 return mtd;
624
625out_unlock:
626 mutex_unlock(&mtd_table_mutex);
627 return ERR_PTR(err);
628}
629
630void put_mtd_device(struct mtd_info *mtd)
631{
632 mutex_lock(&mtd_table_mutex);
633 __put_mtd_device(mtd);
634 mutex_unlock(&mtd_table_mutex);
635
636}
637
638void __put_mtd_device(struct mtd_info *mtd)
639{
640 --mtd->usecount;
641 BUG_ON(mtd->usecount < 0);
642
643 if (mtd->put_device)
644 mtd->put_device(mtd);
645
646 module_put(mtd->owner);
647}
648
649
650
651
652
653int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
654 unsigned long count, loff_t to, size_t *retlen)
655{
656 unsigned long i;
657 size_t totlen = 0, thislen;
658 int ret = 0;
659
660 if(!mtd->write) {
661 ret = -EROFS;
662 } else {
663 for (i=0; i<count; i++) {
664 if (!vecs[i].iov_len)
665 continue;
666 ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
667 totlen += thislen;
668 if (ret || thislen != vecs[i].iov_len)
669 break;
670 to += vecs[i].iov_len;
671 }
672 }
673 if (retlen)
674 *retlen = totlen;
675 return ret;
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
702{
703 gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
704 __GFP_NORETRY | __GFP_NO_KSWAPD;
705 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
706 void *kbuf;
707
708 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
709
710 while (*size > min_alloc) {
711 kbuf = kmalloc(*size, flags);
712 if (kbuf)
713 return kbuf;
714
715 *size >>= 1;
716 *size = ALIGN(*size, mtd->writesize);
717 }
718
719
720
721
722
723 return kmalloc(*size, GFP_KERNEL);
724}
725
726EXPORT_SYMBOL_GPL(get_mtd_device);
727EXPORT_SYMBOL_GPL(get_mtd_device_nm);
728EXPORT_SYMBOL_GPL(__get_mtd_device);
729EXPORT_SYMBOL_GPL(put_mtd_device);
730EXPORT_SYMBOL_GPL(__put_mtd_device);
731EXPORT_SYMBOL_GPL(register_mtd_user);
732EXPORT_SYMBOL_GPL(unregister_mtd_user);
733EXPORT_SYMBOL_GPL(default_mtd_writev);
734EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
735
736#ifdef CONFIG_PROC_FS
737
738
739
740
741static struct proc_dir_entry *proc_mtd;
742
743static int mtd_proc_show(struct seq_file *m, void *v)
744{
745 struct mtd_info *mtd;
746
747 seq_puts(m, "dev: size erasesize name\n");
748 mutex_lock(&mtd_table_mutex);
749 mtd_for_each_device(mtd) {
750 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
751 mtd->index, (unsigned long long)mtd->size,
752 mtd->erasesize, mtd->name);
753 }
754 mutex_unlock(&mtd_table_mutex);
755 return 0;
756}
757
758static int mtd_proc_open(struct inode *inode, struct file *file)
759{
760 return single_open(file, mtd_proc_show, NULL);
761}
762
763static const struct file_operations mtd_proc_ops = {
764 .open = mtd_proc_open,
765 .read = seq_read,
766 .llseek = seq_lseek,
767 .release = single_release,
768};
769#endif
770
771
772
773
774static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
775{
776 int ret;
777
778 ret = bdi_init(bdi);
779 if (!ret)
780 ret = bdi_register(bdi, NULL, name);
781
782 if (ret)
783 bdi_destroy(bdi);
784
785 return ret;
786}
787
788static int __init init_mtd(void)
789{
790 int ret;
791
792 ret = class_register(&mtd_class);
793 if (ret)
794 goto err_reg;
795
796 ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
797 if (ret)
798 goto err_bdi1;
799
800 ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
801 if (ret)
802 goto err_bdi2;
803
804 ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
805 if (ret)
806 goto err_bdi3;
807
808#ifdef CONFIG_PROC_FS
809 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
810#endif
811 return 0;
812
813err_bdi3:
814 bdi_destroy(&mtd_bdi_ro_mappable);
815err_bdi2:
816 bdi_destroy(&mtd_bdi_unmappable);
817err_bdi1:
818 class_unregister(&mtd_class);
819err_reg:
820 pr_err("Error registering mtd class or bdi: %d\n", ret);
821 return ret;
822}
823
824static void __exit cleanup_mtd(void)
825{
826#ifdef CONFIG_PROC_FS
827 if (proc_mtd)
828 remove_proc_entry( "mtd", NULL);
829#endif
830 class_unregister(&mtd_class);
831 bdi_destroy(&mtd_bdi_unmappable);
832 bdi_destroy(&mtd_bdi_ro_mappable);
833 bdi_destroy(&mtd_bdi_rw_mappable);
834}
835
836module_init(init_mtd);
837module_exit(cleanup_mtd);
838
839MODULE_LICENSE("GPL");
840MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
841MODULE_DESCRIPTION("Core MTD registration and access routines");
842