1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/err.h>
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/stringify.h>
36#include <linux/namei.h>
37#include <linux/stat.h>
38#include <linux/miscdevice.h>
39#include <linux/mtd/partitions.h>
40#include <linux/log2.h>
41#include <linux/kthread.h>
42#include <linux/kernel.h>
43#include <linux/slab.h>
44#include "ubi.h"
45
46
47#define MTD_PARAM_LEN_MAX 64
48
49
50#define MTD_PARAM_MAX_COUNT 3
51
52
53#define MAX_MTD_UBI_BEB_LIMIT 768
54
55#ifdef CONFIG_MTD_UBI_MODULE
56#define ubi_is_module() 1
57#else
58#define ubi_is_module() 0
59#endif
60
61
62
63
64
65
66
67
68struct mtd_dev_param {
69 char name[MTD_PARAM_LEN_MAX];
70 int vid_hdr_offs;
71 int max_beb_per1024;
72};
73
74
75static int __initdata mtd_devs;
76
77
78static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
79#ifdef CONFIG_MTD_UBI_FASTMAP
80
81static bool fm_autoconvert;
82#endif
83
84struct class *ubi_class;
85
86
87struct kmem_cache *ubi_wl_entry_slab;
88
89
90static struct miscdevice ubi_ctrl_cdev = {
91 .minor = MISC_DYNAMIC_MINOR,
92 .name = "ubi_ctrl",
93 .fops = &ubi_ctrl_cdev_operations,
94};
95
96
97static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
98
99
100DEFINE_MUTEX(ubi_devices_mutex);
101
102
103static DEFINE_SPINLOCK(ubi_devices_lock);
104
105
106static ssize_t ubi_version_show(struct class *class,
107 struct class_attribute *attr, char *buf)
108{
109 return sprintf(buf, "%d\n", UBI_VERSION);
110}
111
112
113static struct class_attribute ubi_version =
114 __ATTR(version, S_IRUGO, ubi_version_show, NULL);
115
116static ssize_t dev_attribute_show(struct device *dev,
117 struct device_attribute *attr, char *buf);
118
119
120static struct device_attribute dev_eraseblock_size =
121 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
122static struct device_attribute dev_avail_eraseblocks =
123 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
124static struct device_attribute dev_total_eraseblocks =
125 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
126static struct device_attribute dev_volumes_count =
127 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
128static struct device_attribute dev_max_ec =
129 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
130static struct device_attribute dev_reserved_for_bad =
131 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
132static struct device_attribute dev_bad_peb_count =
133 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
134static struct device_attribute dev_max_vol_count =
135 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
136static struct device_attribute dev_min_io_size =
137 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
138static struct device_attribute dev_bgt_enabled =
139 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
140static struct device_attribute dev_mtd_num =
141 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
142
143
144
145
146
147
148
149
150
151
152
153int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
154{
155 struct ubi_notification nt;
156
157 ubi_do_get_device_info(ubi, &nt.di);
158 ubi_do_get_volume_info(ubi, vol, &nt.vi);
159
160#ifdef CONFIG_MTD_UBI_FASTMAP
161 switch (ntype) {
162 case UBI_VOLUME_ADDED:
163 case UBI_VOLUME_REMOVED:
164 case UBI_VOLUME_RESIZED:
165 case UBI_VOLUME_RENAMED:
166 if (ubi_update_fastmap(ubi)) {
167 ubi_err("Unable to update fastmap!");
168 ubi_ro_mode(ubi);
169 }
170 }
171#endif
172 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
187{
188 struct ubi_notification nt;
189 int i, count = 0;
190
191 ubi_do_get_device_info(ubi, &nt.di);
192
193 mutex_lock(&ubi->device_mutex);
194 for (i = 0; i < ubi->vtbl_slots; i++) {
195
196
197
198
199
200 if (!ubi->volumes[i])
201 continue;
202
203 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
204 if (nb)
205 nb->notifier_call(nb, ntype, &nt);
206 else
207 blocking_notifier_call_chain(&ubi_notifiers, ntype,
208 &nt);
209 count += 1;
210 }
211 mutex_unlock(&ubi->device_mutex);
212
213 return count;
214}
215
216
217
218
219
220
221
222
223
224
225int ubi_enumerate_volumes(struct notifier_block *nb)
226{
227 int i, count = 0;
228
229
230
231
232
233 for (i = 0; i < UBI_MAX_DEVICES; i++) {
234 struct ubi_device *ubi = ubi_devices[i];
235
236 if (!ubi)
237 continue;
238 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
239 }
240
241 return count;
242}
243
244
245
246
247
248
249
250
251
252
253struct ubi_device *ubi_get_device(int ubi_num)
254{
255 struct ubi_device *ubi;
256
257 spin_lock(&ubi_devices_lock);
258 ubi = ubi_devices[ubi_num];
259 if (ubi) {
260 ubi_assert(ubi->ref_count >= 0);
261 ubi->ref_count += 1;
262 get_device(&ubi->dev);
263 }
264 spin_unlock(&ubi_devices_lock);
265
266 return ubi;
267}
268
269
270
271
272
273void ubi_put_device(struct ubi_device *ubi)
274{
275 spin_lock(&ubi_devices_lock);
276 ubi->ref_count -= 1;
277 put_device(&ubi->dev);
278 spin_unlock(&ubi_devices_lock);
279}
280
281
282
283
284
285
286
287
288struct ubi_device *ubi_get_by_major(int major)
289{
290 int i;
291 struct ubi_device *ubi;
292
293 spin_lock(&ubi_devices_lock);
294 for (i = 0; i < UBI_MAX_DEVICES; i++) {
295 ubi = ubi_devices[i];
296 if (ubi && MAJOR(ubi->cdev.dev) == major) {
297 ubi_assert(ubi->ref_count >= 0);
298 ubi->ref_count += 1;
299 get_device(&ubi->dev);
300 spin_unlock(&ubi_devices_lock);
301 return ubi;
302 }
303 }
304 spin_unlock(&ubi_devices_lock);
305
306 return NULL;
307}
308
309
310
311
312
313
314
315
316
317int ubi_major2num(int major)
318{
319 int i, ubi_num = -ENODEV;
320
321 spin_lock(&ubi_devices_lock);
322 for (i = 0; i < UBI_MAX_DEVICES; i++) {
323 struct ubi_device *ubi = ubi_devices[i];
324
325 if (ubi && MAJOR(ubi->cdev.dev) == major) {
326 ubi_num = ubi->ubi_num;
327 break;
328 }
329 }
330 spin_unlock(&ubi_devices_lock);
331
332 return ubi_num;
333}
334
335
336static ssize_t dev_attribute_show(struct device *dev,
337 struct device_attribute *attr, char *buf)
338{
339 ssize_t ret;
340 struct ubi_device *ubi;
341
342
343
344
345
346
347
348
349
350
351
352 ubi = container_of(dev, struct ubi_device, dev);
353 ubi = ubi_get_device(ubi->ubi_num);
354 if (!ubi)
355 return -ENODEV;
356
357 if (attr == &dev_eraseblock_size)
358 ret = sprintf(buf, "%d\n", ubi->leb_size);
359 else if (attr == &dev_avail_eraseblocks)
360 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
361 else if (attr == &dev_total_eraseblocks)
362 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
363 else if (attr == &dev_volumes_count)
364 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
365 else if (attr == &dev_max_ec)
366 ret = sprintf(buf, "%d\n", ubi->max_ec);
367 else if (attr == &dev_reserved_for_bad)
368 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
369 else if (attr == &dev_bad_peb_count)
370 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
371 else if (attr == &dev_max_vol_count)
372 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
373 else if (attr == &dev_min_io_size)
374 ret = sprintf(buf, "%d\n", ubi->min_io_size);
375 else if (attr == &dev_bgt_enabled)
376 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
377 else if (attr == &dev_mtd_num)
378 ret = sprintf(buf, "%d\n", ubi->mtd->index);
379 else
380 ret = -EINVAL;
381
382 ubi_put_device(ubi);
383 return ret;
384}
385
386static void dev_release(struct device *dev)
387{
388 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
389
390 kfree(ubi);
391}
392
393
394
395
396
397
398
399
400
401
402static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
403{
404 int err;
405
406 ubi->dev.release = dev_release;
407 ubi->dev.devt = ubi->cdev.dev;
408 ubi->dev.class = ubi_class;
409 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
410 err = device_register(&ubi->dev);
411 if (err)
412 return err;
413
414 *ref = 1;
415 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
416 if (err)
417 return err;
418 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
419 if (err)
420 return err;
421 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
422 if (err)
423 return err;
424 err = device_create_file(&ubi->dev, &dev_volumes_count);
425 if (err)
426 return err;
427 err = device_create_file(&ubi->dev, &dev_max_ec);
428 if (err)
429 return err;
430 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
431 if (err)
432 return err;
433 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
434 if (err)
435 return err;
436 err = device_create_file(&ubi->dev, &dev_max_vol_count);
437 if (err)
438 return err;
439 err = device_create_file(&ubi->dev, &dev_min_io_size);
440 if (err)
441 return err;
442 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
443 if (err)
444 return err;
445 err = device_create_file(&ubi->dev, &dev_mtd_num);
446 return err;
447}
448
449
450
451
452
453static void ubi_sysfs_close(struct ubi_device *ubi)
454{
455 device_remove_file(&ubi->dev, &dev_mtd_num);
456 device_remove_file(&ubi->dev, &dev_bgt_enabled);
457 device_remove_file(&ubi->dev, &dev_min_io_size);
458 device_remove_file(&ubi->dev, &dev_max_vol_count);
459 device_remove_file(&ubi->dev, &dev_bad_peb_count);
460 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
461 device_remove_file(&ubi->dev, &dev_max_ec);
462 device_remove_file(&ubi->dev, &dev_volumes_count);
463 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
464 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
465 device_remove_file(&ubi->dev, &dev_eraseblock_size);
466 device_unregister(&ubi->dev);
467}
468
469
470
471
472
473static void kill_volumes(struct ubi_device *ubi)
474{
475 int i;
476
477 for (i = 0; i < ubi->vtbl_slots; i++)
478 if (ubi->volumes[i])
479 ubi_free_volume(ubi, ubi->volumes[i]);
480}
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500static int uif_init(struct ubi_device *ubi, int *ref)
501{
502 int i, err;
503 dev_t dev;
504
505 *ref = 0;
506 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
507
508
509
510
511
512
513
514
515
516 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
517 if (err) {
518 ubi_err("cannot register UBI character devices");
519 return err;
520 }
521
522 ubi_assert(MINOR(dev) == 0);
523 cdev_init(&ubi->cdev, &ubi_cdev_operations);
524 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
525 ubi->cdev.owner = THIS_MODULE;
526
527 err = cdev_add(&ubi->cdev, dev, 1);
528 if (err) {
529 ubi_err("cannot add character device");
530 goto out_unreg;
531 }
532
533 err = ubi_sysfs_init(ubi, ref);
534 if (err)
535 goto out_sysfs;
536
537 for (i = 0; i < ubi->vtbl_slots; i++)
538 if (ubi->volumes[i]) {
539 err = ubi_add_volume(ubi, ubi->volumes[i]);
540 if (err) {
541 ubi_err("cannot add volume %d", i);
542 goto out_volumes;
543 }
544 }
545
546 return 0;
547
548out_volumes:
549 kill_volumes(ubi);
550out_sysfs:
551 if (*ref)
552 get_device(&ubi->dev);
553 ubi_sysfs_close(ubi);
554 cdev_del(&ubi->cdev);
555out_unreg:
556 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
557 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
558 return err;
559}
560
561
562
563
564
565
566
567
568
569static void uif_close(struct ubi_device *ubi)
570{
571 kill_volumes(ubi);
572 ubi_sysfs_close(ubi);
573 cdev_del(&ubi->cdev);
574 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
575}
576
577
578
579
580
581void ubi_free_internal_volumes(struct ubi_device *ubi)
582{
583 int i;
584
585 for (i = ubi->vtbl_slots;
586 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
587 kfree(ubi->volumes[i]->eba_tbl);
588 kfree(ubi->volumes[i]);
589 }
590}
591
592static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
593{
594 int limit, device_pebs;
595 uint64_t device_size;
596
597 if (!max_beb_per1024)
598 return 0;
599
600
601
602
603
604
605
606
607
608
609 device_size = mtd_get_device_size(ubi->mtd);
610 device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
611 limit = mult_frac(device_pebs, max_beb_per1024, 1024);
612
613
614 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
615 limit += 1;
616
617 return limit;
618}
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636static int io_init(struct ubi_device *ubi, int max_beb_per1024)
637{
638 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
639 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
640
641 if (ubi->mtd->numeraseregions != 0) {
642
643
644
645
646
647
648
649
650
651 ubi_err("multiple regions, not implemented");
652 return -EINVAL;
653 }
654
655 if (ubi->vid_hdr_offset < 0)
656 return -EINVAL;
657
658
659
660
661
662
663 ubi->peb_size = ubi->mtd->erasesize;
664 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
665 ubi->flash_size = ubi->mtd->size;
666
667 if (mtd_can_have_bb(ubi->mtd)) {
668 ubi->bad_allowed = 1;
669 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
670 }
671
672 if (ubi->mtd->type == MTD_NORFLASH) {
673 ubi_assert(ubi->mtd->writesize == 1);
674 ubi->nor_flash = 1;
675 }
676
677 ubi->min_io_size = ubi->mtd->writesize;
678 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
679
680
681
682
683
684
685 if (!is_power_of_2(ubi->min_io_size)) {
686 ubi_err("min. I/O unit (%d) is not power of 2",
687 ubi->min_io_size);
688 return -EINVAL;
689 }
690
691 ubi_assert(ubi->hdrs_min_io_size > 0);
692 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
693 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
694
695 ubi->max_write_size = ubi->mtd->writebufsize;
696
697
698
699
700 if (ubi->max_write_size < ubi->min_io_size ||
701 ubi->max_write_size % ubi->min_io_size ||
702 !is_power_of_2(ubi->max_write_size)) {
703 ubi_err("bad write buffer size %d for %d min. I/O unit",
704 ubi->max_write_size, ubi->min_io_size);
705 return -EINVAL;
706 }
707
708
709 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
710 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
711
712 dbg_gen("min_io_size %d", ubi->min_io_size);
713 dbg_gen("max_write_size %d", ubi->max_write_size);
714 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
715 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
716 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
717
718 if (ubi->vid_hdr_offset == 0)
719
720 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
721 ubi->ec_hdr_alsize;
722 else {
723 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
724 ~(ubi->hdrs_min_io_size - 1);
725 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
726 ubi->vid_hdr_aloffset;
727 }
728
729
730 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
731 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
732
733 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
734 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
735 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
736 dbg_gen("leb_start %d", ubi->leb_start);
737
738
739 if (ubi->vid_hdr_shift % 4) {
740 ubi_err("unaligned VID header shift %d",
741 ubi->vid_hdr_shift);
742 return -EINVAL;
743 }
744
745
746 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
747 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
748 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
749 ubi->leb_start & (ubi->min_io_size - 1)) {
750 ubi_err("bad VID header (%d) or data offsets (%d)",
751 ubi->vid_hdr_offset, ubi->leb_start);
752 return -EINVAL;
753 }
754
755
756
757
758
759 ubi->max_erroneous = ubi->peb_count / 10;
760 if (ubi->max_erroneous < 16)
761 ubi->max_erroneous = 16;
762 dbg_gen("max_erroneous %d", ubi->max_erroneous);
763
764
765
766
767
768
769 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
770 ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
771 ubi->ro_mode = 1;
772 }
773
774 ubi->leb_size = ubi->peb_size - ubi->leb_start;
775
776 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
777 ubi_msg("MTD device %d is write-protected, attach in read-only mode",
778 ubi->mtd->index);
779 ubi->ro_mode = 1;
780 }
781
782
783
784
785
786
787
788
789
790 return 0;
791}
792
793
794
795
796
797
798
799
800
801
802
803static int autoresize(struct ubi_device *ubi, int vol_id)
804{
805 struct ubi_volume_desc desc;
806 struct ubi_volume *vol = ubi->volumes[vol_id];
807 int err, old_reserved_pebs = vol->reserved_pebs;
808
809 if (ubi->ro_mode) {
810 ubi_warn("skip auto-resize because of R/O mode");
811 return 0;
812 }
813
814
815
816
817
818
819 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
820
821 if (ubi->avail_pebs == 0) {
822 struct ubi_vtbl_record vtbl_rec;
823
824
825
826
827
828 memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
829 sizeof(struct ubi_vtbl_record));
830 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
831 if (err)
832 ubi_err("cannot clean auto-resize flag for volume %d",
833 vol_id);
834 } else {
835 desc.vol = vol;
836 err = ubi_resize_volume(&desc,
837 old_reserved_pebs + ubi->avail_pebs);
838 if (err)
839 ubi_err("cannot auto-resize volume %d", vol_id);
840 }
841
842 if (err)
843 return err;
844
845 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
846 vol->name, old_reserved_pebs, vol->reserved_pebs);
847 return 0;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
867 int vid_hdr_offset, int max_beb_per1024)
868{
869 struct ubi_device *ubi;
870 int i, err, ref = 0;
871
872 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
873 return -EINVAL;
874
875 if (!max_beb_per1024)
876 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
877
878
879
880
881
882
883
884 for (i = 0; i < UBI_MAX_DEVICES; i++) {
885 ubi = ubi_devices[i];
886 if (ubi && mtd->index == ubi->mtd->index) {
887 ubi_err("mtd%d is already attached to ubi%d",
888 mtd->index, i);
889 return -EEXIST;
890 }
891 }
892
893
894
895
896
897
898
899
900
901 if (mtd->type == MTD_UBIVOLUME) {
902 ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
903 mtd->index);
904 return -EINVAL;
905 }
906
907 if (ubi_num == UBI_DEV_NUM_AUTO) {
908
909 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
910 if (!ubi_devices[ubi_num])
911 break;
912 if (ubi_num == UBI_MAX_DEVICES) {
913 ubi_err("only %d UBI devices may be created",
914 UBI_MAX_DEVICES);
915 return -ENFILE;
916 }
917 } else {
918 if (ubi_num >= UBI_MAX_DEVICES)
919 return -EINVAL;
920
921
922 if (ubi_devices[ubi_num]) {
923 ubi_err("ubi%d already exists", ubi_num);
924 return -EEXIST;
925 }
926 }
927
928 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
929 if (!ubi)
930 return -ENOMEM;
931
932 ubi->mtd = mtd;
933 ubi->ubi_num = ubi_num;
934 ubi->vid_hdr_offset = vid_hdr_offset;
935 ubi->autoresize_vol_id = -1;
936
937#ifdef CONFIG_MTD_UBI_FASTMAP
938 ubi->fm_pool.used = ubi->fm_pool.size = 0;
939 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
940
941
942
943
944
945 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
946 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
947 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
948 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
949
950 ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
951 ubi->fm_disabled = !fm_autoconvert;
952
953 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
954 <= UBI_FM_MAX_START) {
955 ubi_err("More than %i PEBs are needed for fastmap, sorry.",
956 UBI_FM_MAX_START);
957 ubi->fm_disabled = 1;
958 }
959
960 ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
961 ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
962#else
963 ubi->fm_disabled = 1;
964#endif
965 mutex_init(&ubi->buf_mutex);
966 mutex_init(&ubi->ckvol_mutex);
967 mutex_init(&ubi->device_mutex);
968 spin_lock_init(&ubi->volumes_lock);
969 mutex_init(&ubi->fm_mutex);
970 init_rwsem(&ubi->fm_sem);
971
972 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
973
974 err = io_init(ubi, max_beb_per1024);
975 if (err)
976 goto out_free;
977
978 err = -ENOMEM;
979 ubi->peb_buf = vmalloc(ubi->peb_size);
980 if (!ubi->peb_buf)
981 goto out_free;
982
983#ifdef CONFIG_MTD_UBI_FASTMAP
984 ubi->fm_size = ubi_calc_fm_size(ubi);
985 ubi->fm_buf = vzalloc(ubi->fm_size);
986 if (!ubi->fm_buf)
987 goto out_free;
988#endif
989 err = ubi_debugging_init_dev(ubi);
990 if (err)
991 goto out_free;
992
993 err = ubi_attach(ubi, 0);
994 if (err) {
995 ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
996 goto out_debugging;
997 }
998
999 if (ubi->autoresize_vol_id != -1) {
1000 err = autoresize(ubi, ubi->autoresize_vol_id);
1001 if (err)
1002 goto out_detach;
1003 }
1004
1005 err = uif_init(ubi, &ref);
1006 if (err)
1007 goto out_detach;
1008
1009 err = ubi_debugfs_init_dev(ubi);
1010 if (err)
1011 goto out_uif;
1012
1013 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
1014 if (IS_ERR(ubi->bgt_thread)) {
1015 err = PTR_ERR(ubi->bgt_thread);
1016 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1017 err);
1018 goto out_debugfs;
1019 }
1020
1021 ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
1022 mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
1023 ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
1024 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
1025 ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
1026 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
1027 ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
1028 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
1029 ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
1030 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
1031 ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
1032 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
1033 ubi->vtbl_slots);
1034 ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
1035 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
1036 ubi->image_seq);
1037 ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
1038 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
1039
1040
1041
1042
1043
1044 spin_lock(&ubi->wl_lock);
1045 ubi->thread_enabled = 1;
1046 wake_up_process(ubi->bgt_thread);
1047 spin_unlock(&ubi->wl_lock);
1048
1049 ubi_devices[ubi_num] = ubi;
1050 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
1051 return ubi_num;
1052
1053out_debugfs:
1054 ubi_debugfs_exit_dev(ubi);
1055out_uif:
1056 get_device(&ubi->dev);
1057 ubi_assert(ref);
1058 uif_close(ubi);
1059out_detach:
1060 ubi_wl_close(ubi);
1061 ubi_free_internal_volumes(ubi);
1062 vfree(ubi->vtbl);
1063out_debugging:
1064 ubi_debugging_exit_dev(ubi);
1065out_free:
1066 vfree(ubi->peb_buf);
1067 vfree(ubi->fm_buf);
1068 if (ref)
1069 put_device(&ubi->dev);
1070 else
1071 kfree(ubi);
1072 return err;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088int ubi_detach_mtd_dev(int ubi_num, int anyway)
1089{
1090 struct ubi_device *ubi;
1091
1092 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1093 return -EINVAL;
1094
1095 ubi = ubi_get_device(ubi_num);
1096 if (!ubi)
1097 return -EINVAL;
1098
1099 spin_lock(&ubi_devices_lock);
1100 put_device(&ubi->dev);
1101 ubi->ref_count -= 1;
1102 if (ubi->ref_count) {
1103 if (!anyway) {
1104 spin_unlock(&ubi_devices_lock);
1105 return -EBUSY;
1106 }
1107
1108 ubi_err("%s reference count %d, destroy anyway",
1109 ubi->ubi_name, ubi->ref_count);
1110 }
1111 ubi_devices[ubi_num] = NULL;
1112 spin_unlock(&ubi_devices_lock);
1113
1114 ubi_assert(ubi_num == ubi->ubi_num);
1115 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
1116 ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
1117#ifdef CONFIG_MTD_UBI_FASTMAP
1118
1119
1120 ubi_update_fastmap(ubi);
1121#endif
1122
1123
1124
1125
1126 if (ubi->bgt_thread)
1127 kthread_stop(ubi->bgt_thread);
1128
1129
1130
1131
1132
1133 get_device(&ubi->dev);
1134
1135 ubi_debugfs_exit_dev(ubi);
1136 uif_close(ubi);
1137
1138 ubi_wl_close(ubi);
1139 ubi_free_internal_volumes(ubi);
1140 vfree(ubi->vtbl);
1141 put_mtd_device(ubi->mtd);
1142 ubi_debugging_exit_dev(ubi);
1143 vfree(ubi->peb_buf);
1144 vfree(ubi->fm_buf);
1145 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
1146 put_device(&ubi->dev);
1147 return 0;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1159{
1160 int err, major, minor, mode;
1161 struct path path;
1162
1163
1164 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1165 if (err)
1166 return ERR_PTR(err);
1167
1168
1169 major = imajor(path.dentry->d_inode);
1170 minor = iminor(path.dentry->d_inode);
1171 mode = path.dentry->d_inode->i_mode;
1172 path_put(&path);
1173 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
1174 return ERR_PTR(-EINVAL);
1175
1176 if (minor & 1)
1177
1178
1179
1180
1181 return ERR_PTR(-EINVAL);
1182
1183 return get_mtd_device(NULL, minor / 2);
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1197{
1198 struct mtd_info *mtd;
1199 int mtd_num;
1200 char *endp;
1201
1202 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1203 if (*endp != '\0' || mtd_dev == endp) {
1204
1205
1206
1207
1208 mtd = get_mtd_device_nm(mtd_dev);
1209 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
1210
1211 mtd = open_mtd_by_chdev(mtd_dev);
1212 } else
1213 mtd = get_mtd_device(NULL, mtd_num);
1214
1215 return mtd;
1216}
1217
1218static int __init ubi_init(void)
1219{
1220 int err, i, k;
1221
1222
1223 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1224 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1225
1226 if (mtd_devs > UBI_MAX_DEVICES) {
1227 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
1228 return -EINVAL;
1229 }
1230
1231
1232 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
1233 if (IS_ERR(ubi_class)) {
1234 err = PTR_ERR(ubi_class);
1235 ubi_err("cannot create UBI class");
1236 goto out;
1237 }
1238
1239 err = class_create_file(ubi_class, &ubi_version);
1240 if (err) {
1241 ubi_err("cannot create sysfs file");
1242 goto out_class;
1243 }
1244
1245 err = misc_register(&ubi_ctrl_cdev);
1246 if (err) {
1247 ubi_err("cannot register device");
1248 goto out_version;
1249 }
1250
1251 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
1252 sizeof(struct ubi_wl_entry),
1253 0, 0, NULL);
1254 if (!ubi_wl_entry_slab)
1255 goto out_dev_unreg;
1256
1257 err = ubi_debugfs_init();
1258 if (err)
1259 goto out_slab;
1260
1261
1262
1263 for (i = 0; i < mtd_devs; i++) {
1264 struct mtd_dev_param *p = &mtd_dev_param[i];
1265 struct mtd_info *mtd;
1266
1267 cond_resched();
1268
1269 mtd = open_mtd_device(p->name);
1270 if (IS_ERR(mtd)) {
1271 err = PTR_ERR(mtd);
1272 goto out_detach;
1273 }
1274
1275 mutex_lock(&ubi_devices_mutex);
1276 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
1277 p->vid_hdr_offs, p->max_beb_per1024);
1278 mutex_unlock(&ubi_devices_mutex);
1279 if (err < 0) {
1280 ubi_err("cannot attach mtd%d", mtd->index);
1281 put_mtd_device(mtd);
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296 if (ubi_is_module())
1297 goto out_detach;
1298 }
1299 }
1300
1301 return 0;
1302
1303out_detach:
1304 for (k = 0; k < i; k++)
1305 if (ubi_devices[k]) {
1306 mutex_lock(&ubi_devices_mutex);
1307 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1308 mutex_unlock(&ubi_devices_mutex);
1309 }
1310 ubi_debugfs_exit();
1311out_slab:
1312 kmem_cache_destroy(ubi_wl_entry_slab);
1313out_dev_unreg:
1314 misc_deregister(&ubi_ctrl_cdev);
1315out_version:
1316 class_remove_file(ubi_class, &ubi_version);
1317out_class:
1318 class_destroy(ubi_class);
1319out:
1320 ubi_err("UBI error: cannot initialize UBI, error %d", err);
1321 return err;
1322}
1323late_initcall(ubi_init);
1324
1325static void __exit ubi_exit(void)
1326{
1327 int i;
1328
1329 for (i = 0; i < UBI_MAX_DEVICES; i++)
1330 if (ubi_devices[i]) {
1331 mutex_lock(&ubi_devices_mutex);
1332 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1333 mutex_unlock(&ubi_devices_mutex);
1334 }
1335 ubi_debugfs_exit();
1336 kmem_cache_destroy(ubi_wl_entry_slab);
1337 misc_deregister(&ubi_ctrl_cdev);
1338 class_remove_file(ubi_class, &ubi_version);
1339 class_destroy(ubi_class);
1340}
1341module_exit(ubi_exit);
1342
1343
1344
1345
1346
1347
1348
1349
1350static int __init bytes_str_to_int(const char *str)
1351{
1352 char *endp;
1353 unsigned long result;
1354
1355 result = simple_strtoul(str, &endp, 0);
1356 if (str == endp || result >= INT_MAX) {
1357 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1358 return -EINVAL;
1359 }
1360
1361 switch (*endp) {
1362 case 'G':
1363 result *= 1024;
1364 case 'M':
1365 result *= 1024;
1366 case 'K':
1367 result *= 1024;
1368 if (endp[1] == 'i' && endp[2] == 'B')
1369 endp += 2;
1370 case '\0':
1371 break;
1372 default:
1373 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1374 return -EINVAL;
1375 }
1376
1377 return result;
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1389{
1390 int i, len;
1391 struct mtd_dev_param *p;
1392 char buf[MTD_PARAM_LEN_MAX];
1393 char *pbuf = &buf[0];
1394 char *tokens[MTD_PARAM_MAX_COUNT];
1395
1396 if (!val)
1397 return -EINVAL;
1398
1399 if (mtd_devs == UBI_MAX_DEVICES) {
1400 ubi_err("UBI error: too many parameters, max. is %d\n",
1401 UBI_MAX_DEVICES);
1402 return -EINVAL;
1403 }
1404
1405 len = strnlen(val, MTD_PARAM_LEN_MAX);
1406 if (len == MTD_PARAM_LEN_MAX) {
1407 ubi_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1408 val, MTD_PARAM_LEN_MAX);
1409 return -EINVAL;
1410 }
1411
1412 if (len == 0) {
1413 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
1414 return 0;
1415 }
1416
1417 strcpy(buf, val);
1418
1419
1420 if (buf[len - 1] == '\n')
1421 buf[len - 1] = '\0';
1422
1423 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
1424 tokens[i] = strsep(&pbuf, ",");
1425
1426 if (pbuf) {
1427 ubi_err("UBI error: too many arguments at \"%s\"\n", val);
1428 return -EINVAL;
1429 }
1430
1431 p = &mtd_dev_param[mtd_devs];
1432 strcpy(&p->name[0], tokens[0]);
1433
1434 if (tokens[1])
1435 p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
1436
1437 if (p->vid_hdr_offs < 0)
1438 return p->vid_hdr_offs;
1439
1440 if (tokens[2]) {
1441 int err = kstrtoint(tokens[2], 10, &p->max_beb_per1024);
1442
1443 if (err) {
1444 ubi_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1445 tokens[2]);
1446 return -EINVAL;
1447 }
1448 }
1449
1450 mtd_devs += 1;
1451 return 0;
1452}
1453
1454module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
1455MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024]].\n"
1456 "Multiple \"mtd\" parameters may be specified.\n"
1457 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1458 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1459 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1460 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
1461 "\n"
1462 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1463 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1464 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
1465 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
1466#ifdef CONFIG_MTD_UBI_FASTMAP
1467module_param(fm_autoconvert, bool, 0644);
1468MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
1469#endif
1470MODULE_VERSION(__stringify(UBI_VERSION));
1471MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1472MODULE_AUTHOR("Artem Bityutskiy");
1473MODULE_LICENSE("GPL");
1474