1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/err.h>
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/stringify.h>
36#include <linux/namei.h>
37#include <linux/stat.h>
38#include <linux/miscdevice.h>
39#include <linux/mtd/partitions.h>
40#include <linux/log2.h>
41#include <linux/kthread.h>
42#include <linux/kernel.h>
43#include <linux/slab.h>
44#include <linux/major.h>
45#include "ubi.h"
46
47
48#define MTD_PARAM_LEN_MAX 64
49
50
51#define MTD_PARAM_MAX_COUNT 4
52
53
54#define MAX_MTD_UBI_BEB_LIMIT 768
55
56#ifdef CONFIG_MTD_UBI_MODULE
57#define ubi_is_module() 1
58#else
59#define ubi_is_module() 0
60#endif
61
62
63
64
65
66
67
68
69struct mtd_dev_param {
70 char name[MTD_PARAM_LEN_MAX];
71 int ubi_num;
72 int vid_hdr_offs;
73 int max_beb_per1024;
74};
75
76
77static int __initdata mtd_devs;
78
79
80static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
81#ifdef CONFIG_MTD_UBI_FASTMAP
82
83static bool fm_autoconvert;
84static bool fm_debug;
85#endif
86
87
88struct kmem_cache *ubi_wl_entry_slab;
89
90
91static struct miscdevice ubi_ctrl_cdev = {
92 .minor = MISC_DYNAMIC_MINOR,
93 .name = "ubi_ctrl",
94 .fops = &ubi_ctrl_cdev_operations,
95};
96
97
98static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
99
100
101DEFINE_MUTEX(ubi_devices_mutex);
102
103
104static DEFINE_SPINLOCK(ubi_devices_lock);
105
106
107static ssize_t ubi_version_show(struct class *class,
108 struct class_attribute *attr, char *buf)
109{
110 return sprintf(buf, "%d\n", UBI_VERSION);
111}
112
113
114static struct class_attribute ubi_class_attrs[] = {
115 __ATTR(version, S_IRUGO, ubi_version_show, NULL),
116 __ATTR_NULL
117};
118
119
120struct class ubi_class = {
121 .name = UBI_NAME_STR,
122 .owner = THIS_MODULE,
123 .class_attrs = ubi_class_attrs,
124};
125
126static ssize_t dev_attribute_show(struct device *dev,
127 struct device_attribute *attr, char *buf);
128
129
130static struct device_attribute dev_eraseblock_size =
131 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
132static struct device_attribute dev_avail_eraseblocks =
133 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
134static struct device_attribute dev_total_eraseblocks =
135 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
136static struct device_attribute dev_volumes_count =
137 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
138static struct device_attribute dev_max_ec =
139 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
140static struct device_attribute dev_reserved_for_bad =
141 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
142static struct device_attribute dev_bad_peb_count =
143 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
144static struct device_attribute dev_max_vol_count =
145 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
146static struct device_attribute dev_min_io_size =
147 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
148static struct device_attribute dev_bgt_enabled =
149 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
150static struct device_attribute dev_mtd_num =
151 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
152static struct device_attribute dev_ro_mode =
153 __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
154
155
156
157
158
159
160
161
162
163
164
165int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
166{
167 int ret;
168 struct ubi_notification nt;
169
170 ubi_do_get_device_info(ubi, &nt.di);
171 ubi_do_get_volume_info(ubi, vol, &nt.vi);
172
173 switch (ntype) {
174 case UBI_VOLUME_ADDED:
175 case UBI_VOLUME_REMOVED:
176 case UBI_VOLUME_RESIZED:
177 case UBI_VOLUME_RENAMED:
178 ret = ubi_update_fastmap(ubi);
179 if (ret)
180 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
181 }
182
183 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
184}
185
186
187
188
189
190
191
192
193
194
195
196
197int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
198{
199 struct ubi_notification nt;
200 int i, count = 0;
201
202 ubi_do_get_device_info(ubi, &nt.di);
203
204 mutex_lock(&ubi->device_mutex);
205 for (i = 0; i < ubi->vtbl_slots; i++) {
206
207
208
209
210
211 if (!ubi->volumes[i])
212 continue;
213
214 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
215 if (nb)
216 nb->notifier_call(nb, ntype, &nt);
217 else
218 blocking_notifier_call_chain(&ubi_notifiers, ntype,
219 &nt);
220 count += 1;
221 }
222 mutex_unlock(&ubi->device_mutex);
223
224 return count;
225}
226
227
228
229
230
231
232
233
234
235
236int ubi_enumerate_volumes(struct notifier_block *nb)
237{
238 int i, count = 0;
239
240
241
242
243
244 for (i = 0; i < UBI_MAX_DEVICES; i++) {
245 struct ubi_device *ubi = ubi_devices[i];
246
247 if (!ubi)
248 continue;
249 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
250 }
251
252 return count;
253}
254
255
256
257
258
259
260
261
262
263
264struct ubi_device *ubi_get_device(int ubi_num)
265{
266 struct ubi_device *ubi;
267
268 spin_lock(&ubi_devices_lock);
269 ubi = ubi_devices[ubi_num];
270 if (ubi) {
271 ubi_assert(ubi->ref_count >= 0);
272 ubi->ref_count += 1;
273 get_device(&ubi->dev);
274 }
275 spin_unlock(&ubi_devices_lock);
276
277 return ubi;
278}
279
280
281
282
283
284void ubi_put_device(struct ubi_device *ubi)
285{
286 spin_lock(&ubi_devices_lock);
287 ubi->ref_count -= 1;
288 put_device(&ubi->dev);
289 spin_unlock(&ubi_devices_lock);
290}
291
292
293
294
295
296
297
298
299struct ubi_device *ubi_get_by_major(int major)
300{
301 int i;
302 struct ubi_device *ubi;
303
304 spin_lock(&ubi_devices_lock);
305 for (i = 0; i < UBI_MAX_DEVICES; i++) {
306 ubi = ubi_devices[i];
307 if (ubi && MAJOR(ubi->cdev.dev) == major) {
308 ubi_assert(ubi->ref_count >= 0);
309 ubi->ref_count += 1;
310 get_device(&ubi->dev);
311 spin_unlock(&ubi_devices_lock);
312 return ubi;
313 }
314 }
315 spin_unlock(&ubi_devices_lock);
316
317 return NULL;
318}
319
320
321
322
323
324
325
326
327
328int ubi_major2num(int major)
329{
330 int i, ubi_num = -ENODEV;
331
332 spin_lock(&ubi_devices_lock);
333 for (i = 0; i < UBI_MAX_DEVICES; i++) {
334 struct ubi_device *ubi = ubi_devices[i];
335
336 if (ubi && MAJOR(ubi->cdev.dev) == major) {
337 ubi_num = ubi->ubi_num;
338 break;
339 }
340 }
341 spin_unlock(&ubi_devices_lock);
342
343 return ubi_num;
344}
345
346
347static ssize_t dev_attribute_show(struct device *dev,
348 struct device_attribute *attr, char *buf)
349{
350 ssize_t ret;
351 struct ubi_device *ubi;
352
353
354
355
356
357
358
359
360
361
362
363 ubi = container_of(dev, struct ubi_device, dev);
364 ubi = ubi_get_device(ubi->ubi_num);
365 if (!ubi)
366 return -ENODEV;
367
368 if (attr == &dev_eraseblock_size)
369 ret = sprintf(buf, "%d\n", ubi->leb_size);
370 else if (attr == &dev_avail_eraseblocks)
371 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
372 else if (attr == &dev_total_eraseblocks)
373 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
374 else if (attr == &dev_volumes_count)
375 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
376 else if (attr == &dev_max_ec)
377 ret = sprintf(buf, "%d\n", ubi->max_ec);
378 else if (attr == &dev_reserved_for_bad)
379 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
380 else if (attr == &dev_bad_peb_count)
381 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
382 else if (attr == &dev_max_vol_count)
383 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
384 else if (attr == &dev_min_io_size)
385 ret = sprintf(buf, "%d\n", ubi->min_io_size);
386 else if (attr == &dev_bgt_enabled)
387 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
388 else if (attr == &dev_mtd_num)
389 ret = sprintf(buf, "%d\n", ubi->mtd->index);
390 else if (attr == &dev_ro_mode)
391 ret = sprintf(buf, "%d\n", ubi->ro_mode);
392 else
393 ret = -EINVAL;
394
395 ubi_put_device(ubi);
396 return ret;
397}
398
399static struct attribute *ubi_dev_attrs[] = {
400 &dev_eraseblock_size.attr,
401 &dev_avail_eraseblocks.attr,
402 &dev_total_eraseblocks.attr,
403 &dev_volumes_count.attr,
404 &dev_max_ec.attr,
405 &dev_reserved_for_bad.attr,
406 &dev_bad_peb_count.attr,
407 &dev_max_vol_count.attr,
408 &dev_min_io_size.attr,
409 &dev_bgt_enabled.attr,
410 &dev_mtd_num.attr,
411 &dev_ro_mode.attr,
412 NULL
413};
414ATTRIBUTE_GROUPS(ubi_dev);
415
416static void dev_release(struct device *dev)
417{
418 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
419
420 kfree(ubi);
421}
422
423
424
425
426
427
428
429
430
431
432static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
433{
434 int err;
435
436 ubi->dev.release = dev_release;
437 ubi->dev.devt = ubi->cdev.dev;
438 ubi->dev.class = &ubi_class;
439 ubi->dev.groups = ubi_dev_groups;
440 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
441 err = device_register(&ubi->dev);
442 if (err)
443 return err;
444
445 *ref = 1;
446 return 0;
447}
448
449
450
451
452
453static void ubi_sysfs_close(struct ubi_device *ubi)
454{
455 device_unregister(&ubi->dev);
456}
457
458
459
460
461
462static void kill_volumes(struct ubi_device *ubi)
463{
464 int i;
465
466 for (i = 0; i < ubi->vtbl_slots; i++)
467 if (ubi->volumes[i])
468 ubi_free_volume(ubi, ubi->volumes[i]);
469}
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489static int uif_init(struct ubi_device *ubi, int *ref)
490{
491 int i, err;
492 dev_t dev;
493
494 *ref = 0;
495 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
496
497
498
499
500
501
502
503
504
505 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
506 if (err) {
507 ubi_err(ubi, "cannot register UBI character devices");
508 return err;
509 }
510
511 ubi_assert(MINOR(dev) == 0);
512 cdev_init(&ubi->cdev, &ubi_cdev_operations);
513 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
514 ubi->cdev.owner = THIS_MODULE;
515
516 err = cdev_add(&ubi->cdev, dev, 1);
517 if (err) {
518 ubi_err(ubi, "cannot add character device");
519 goto out_unreg;
520 }
521
522 err = ubi_sysfs_init(ubi, ref);
523 if (err)
524 goto out_sysfs;
525
526 for (i = 0; i < ubi->vtbl_slots; i++)
527 if (ubi->volumes[i]) {
528 err = ubi_add_volume(ubi, ubi->volumes[i]);
529 if (err) {
530 ubi_err(ubi, "cannot add volume %d", i);
531 goto out_volumes;
532 }
533 }
534
535 return 0;
536
537out_volumes:
538 kill_volumes(ubi);
539out_sysfs:
540 if (*ref)
541 get_device(&ubi->dev);
542 ubi_sysfs_close(ubi);
543 cdev_del(&ubi->cdev);
544out_unreg:
545 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
546 ubi_err(ubi, "cannot initialize UBI %s, error %d",
547 ubi->ubi_name, err);
548 return err;
549}
550
551
552
553
554
555
556
557
558
559static void uif_close(struct ubi_device *ubi)
560{
561 kill_volumes(ubi);
562 ubi_sysfs_close(ubi);
563 cdev_del(&ubi->cdev);
564 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
565}
566
567
568
569
570
571void ubi_free_internal_volumes(struct ubi_device *ubi)
572{
573 int i;
574
575 for (i = ubi->vtbl_slots;
576 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
577 ubi_eba_replace_table(ubi->volumes[i], NULL);
578 kfree(ubi->volumes[i]);
579 }
580}
581
582static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
583{
584 int limit, device_pebs;
585 uint64_t device_size;
586
587 if (!max_beb_per1024)
588 return 0;
589
590
591
592
593
594
595
596
597
598
599 device_size = mtd_get_device_size(ubi->mtd);
600 device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
601 limit = mult_frac(device_pebs, max_beb_per1024, 1024);
602
603
604 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
605 limit += 1;
606
607 return limit;
608}
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626static int io_init(struct ubi_device *ubi, int max_beb_per1024)
627{
628 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
629 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
630
631 if (ubi->mtd->numeraseregions != 0) {
632
633
634
635
636
637
638
639
640
641 ubi_err(ubi, "multiple regions, not implemented");
642 return -EINVAL;
643 }
644
645 if (ubi->vid_hdr_offset < 0)
646 return -EINVAL;
647
648
649
650
651
652
653 ubi->peb_size = ubi->mtd->erasesize;
654 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
655 ubi->flash_size = ubi->mtd->size;
656
657 if (mtd_can_have_bb(ubi->mtd)) {
658 ubi->bad_allowed = 1;
659 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
660 }
661
662 if (ubi->mtd->type == MTD_NORFLASH) {
663 ubi_assert(ubi->mtd->writesize == 1);
664 ubi->nor_flash = 1;
665 }
666
667 ubi->min_io_size = ubi->mtd->writesize;
668 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
669
670
671
672
673
674
675 if (!is_power_of_2(ubi->min_io_size)) {
676 ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
677 ubi->min_io_size);
678 return -EINVAL;
679 }
680
681 ubi_assert(ubi->hdrs_min_io_size > 0);
682 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
683 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
684
685 ubi->max_write_size = ubi->mtd->writebufsize;
686
687
688
689
690 if (ubi->max_write_size < ubi->min_io_size ||
691 ubi->max_write_size % ubi->min_io_size ||
692 !is_power_of_2(ubi->max_write_size)) {
693 ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
694 ubi->max_write_size, ubi->min_io_size);
695 return -EINVAL;
696 }
697
698
699 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
700 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
701
702 dbg_gen("min_io_size %d", ubi->min_io_size);
703 dbg_gen("max_write_size %d", ubi->max_write_size);
704 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
705 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
706 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
707
708 if (ubi->vid_hdr_offset == 0)
709
710 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
711 ubi->ec_hdr_alsize;
712 else {
713 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
714 ~(ubi->hdrs_min_io_size - 1);
715 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
716 ubi->vid_hdr_aloffset;
717 }
718
719
720 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
721 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
722
723 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
724 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
725 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
726 dbg_gen("leb_start %d", ubi->leb_start);
727
728
729 if (ubi->vid_hdr_shift % 4) {
730 ubi_err(ubi, "unaligned VID header shift %d",
731 ubi->vid_hdr_shift);
732 return -EINVAL;
733 }
734
735
736 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
737 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
738 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
739 ubi->leb_start & (ubi->min_io_size - 1)) {
740 ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
741 ubi->vid_hdr_offset, ubi->leb_start);
742 return -EINVAL;
743 }
744
745
746
747
748
749 ubi->max_erroneous = ubi->peb_count / 10;
750 if (ubi->max_erroneous < 16)
751 ubi->max_erroneous = 16;
752 dbg_gen("max_erroneous %d", ubi->max_erroneous);
753
754
755
756
757
758
759 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
760 ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
761 ubi->ro_mode = 1;
762 }
763
764 ubi->leb_size = ubi->peb_size - ubi->leb_start;
765
766 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
767 ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
768 ubi->mtd->index);
769 ubi->ro_mode = 1;
770 }
771
772
773
774
775
776
777
778
779
780 return 0;
781}
782
783
784
785
786
787
788
789
790
791
792
793static int autoresize(struct ubi_device *ubi, int vol_id)
794{
795 struct ubi_volume_desc desc;
796 struct ubi_volume *vol = ubi->volumes[vol_id];
797 int err, old_reserved_pebs = vol->reserved_pebs;
798
799 if (ubi->ro_mode) {
800 ubi_warn(ubi, "skip auto-resize because of R/O mode");
801 return 0;
802 }
803
804
805
806
807
808
809 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
810
811 if (ubi->avail_pebs == 0) {
812 struct ubi_vtbl_record vtbl_rec;
813
814
815
816
817
818 vtbl_rec = ubi->vtbl[vol_id];
819 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
820 if (err)
821 ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
822 vol_id);
823 } else {
824 desc.vol = vol;
825 err = ubi_resize_volume(&desc,
826 old_reserved_pebs + ubi->avail_pebs);
827 if (err)
828 ubi_err(ubi, "cannot auto-resize volume %d",
829 vol_id);
830 }
831
832 if (err)
833 return err;
834
835 ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
836 vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
837 return 0;
838}
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
857 int vid_hdr_offset, int max_beb_per1024)
858{
859 struct ubi_device *ubi;
860 int i, err, ref = 0;
861
862 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
863 return -EINVAL;
864
865 if (!max_beb_per1024)
866 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
867
868
869
870
871
872
873
874 for (i = 0; i < UBI_MAX_DEVICES; i++) {
875 ubi = ubi_devices[i];
876 if (ubi && mtd->index == ubi->mtd->index) {
877 pr_err("ubi: mtd%d is already attached to ubi%d",
878 mtd->index, i);
879 return -EEXIST;
880 }
881 }
882
883
884
885
886
887
888
889
890
891 if (mtd->type == MTD_UBIVOLUME) {
892 pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI",
893 mtd->index);
894 return -EINVAL;
895 }
896
897 if (ubi_num == UBI_DEV_NUM_AUTO) {
898
899 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
900 if (!ubi_devices[ubi_num])
901 break;
902 if (ubi_num == UBI_MAX_DEVICES) {
903 pr_err("ubi: only %d UBI devices may be created",
904 UBI_MAX_DEVICES);
905 return -ENFILE;
906 }
907 } else {
908 if (ubi_num >= UBI_MAX_DEVICES)
909 return -EINVAL;
910
911
912 if (ubi_devices[ubi_num]) {
913 pr_err("ubi: ubi%i already exists", ubi_num);
914 return -EEXIST;
915 }
916 }
917
918 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
919 if (!ubi)
920 return -ENOMEM;
921
922 ubi->mtd = mtd;
923 ubi->ubi_num = ubi_num;
924 ubi->vid_hdr_offset = vid_hdr_offset;
925 ubi->autoresize_vol_id = -1;
926
927#ifdef CONFIG_MTD_UBI_FASTMAP
928 ubi->fm_pool.used = ubi->fm_pool.size = 0;
929 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
930
931
932
933
934
935 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
936 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
937 ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
938 UBI_FM_MIN_POOL_SIZE);
939
940 ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
941 ubi->fm_disabled = !fm_autoconvert;
942 if (fm_debug)
943 ubi_enable_dbg_chk_fastmap(ubi);
944
945 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
946 <= UBI_FM_MAX_START) {
947 ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
948 UBI_FM_MAX_START);
949 ubi->fm_disabled = 1;
950 }
951
952 ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
953 ubi_msg(ubi, "default fastmap WL pool size: %d",
954 ubi->fm_wl_pool.max_size);
955#else
956 ubi->fm_disabled = 1;
957#endif
958 mutex_init(&ubi->buf_mutex);
959 mutex_init(&ubi->ckvol_mutex);
960 mutex_init(&ubi->device_mutex);
961 spin_lock_init(&ubi->volumes_lock);
962 init_rwsem(&ubi->fm_protect);
963 init_rwsem(&ubi->fm_eba_sem);
964
965 ubi_msg(ubi, "attaching mtd%d", mtd->index);
966
967 err = io_init(ubi, max_beb_per1024);
968 if (err)
969 goto out_free;
970
971 err = -ENOMEM;
972 ubi->peb_buf = vmalloc(ubi->peb_size);
973 if (!ubi->peb_buf)
974 goto out_free;
975
976#ifdef CONFIG_MTD_UBI_FASTMAP
977 ubi->fm_size = ubi_calc_fm_size(ubi);
978 ubi->fm_buf = vzalloc(ubi->fm_size);
979 if (!ubi->fm_buf)
980 goto out_free;
981#endif
982 err = ubi_attach(ubi, 0);
983 if (err) {
984 ubi_err(ubi, "failed to attach mtd%d, error %d",
985 mtd->index, err);
986 goto out_free;
987 }
988
989 if (ubi->autoresize_vol_id != -1) {
990 err = autoresize(ubi, ubi->autoresize_vol_id);
991 if (err)
992 goto out_detach;
993 }
994
995
996 ubi_devices[ubi_num] = ubi;
997
998 err = uif_init(ubi, &ref);
999 if (err)
1000 goto out_detach;
1001
1002 err = ubi_debugfs_init_dev(ubi);
1003 if (err)
1004 goto out_uif;
1005
1006 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
1007 if (IS_ERR(ubi->bgt_thread)) {
1008 err = PTR_ERR(ubi->bgt_thread);
1009 ubi_err(ubi, "cannot spawn \"%s\", error %d",
1010 ubi->bgt_name, err);
1011 goto out_debugfs;
1012 }
1013
1014 ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
1015 mtd->index, mtd->name, ubi->flash_size >> 20);
1016 ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
1017 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
1018 ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
1019 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
1020 ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
1021 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
1022 ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
1023 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
1024 ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
1025 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
1026 ubi->vtbl_slots);
1027 ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
1028 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
1029 ubi->image_seq);
1030 ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
1031 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
1032
1033
1034
1035
1036
1037 spin_lock(&ubi->wl_lock);
1038 ubi->thread_enabled = 1;
1039 wake_up_process(ubi->bgt_thread);
1040 spin_unlock(&ubi->wl_lock);
1041
1042 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
1043 return ubi_num;
1044
1045out_debugfs:
1046 ubi_debugfs_exit_dev(ubi);
1047out_uif:
1048 get_device(&ubi->dev);
1049 ubi_assert(ref);
1050 uif_close(ubi);
1051out_detach:
1052 ubi_devices[ubi_num] = NULL;
1053 ubi_wl_close(ubi);
1054 ubi_free_internal_volumes(ubi);
1055 vfree(ubi->vtbl);
1056out_free:
1057 vfree(ubi->peb_buf);
1058 vfree(ubi->fm_buf);
1059 if (ref)
1060 put_device(&ubi->dev);
1061 else
1062 kfree(ubi);
1063 return err;
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079int ubi_detach_mtd_dev(int ubi_num, int anyway)
1080{
1081 struct ubi_device *ubi;
1082
1083 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1084 return -EINVAL;
1085
1086 ubi = ubi_get_device(ubi_num);
1087 if (!ubi)
1088 return -EINVAL;
1089
1090 spin_lock(&ubi_devices_lock);
1091 put_device(&ubi->dev);
1092 ubi->ref_count -= 1;
1093 if (ubi->ref_count) {
1094 if (!anyway) {
1095 spin_unlock(&ubi_devices_lock);
1096 return -EBUSY;
1097 }
1098
1099 ubi_err(ubi, "%s reference count %d, destroy anyway",
1100 ubi->ubi_name, ubi->ref_count);
1101 }
1102 ubi_devices[ubi_num] = NULL;
1103 spin_unlock(&ubi_devices_lock);
1104
1105 ubi_assert(ubi_num == ubi->ubi_num);
1106 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
1107 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
1108#ifdef CONFIG_MTD_UBI_FASTMAP
1109
1110
1111
1112
1113 if (!ubi_dbg_chk_fastmap(ubi))
1114 ubi_update_fastmap(ubi);
1115#endif
1116
1117
1118
1119
1120 if (ubi->bgt_thread)
1121 kthread_stop(ubi->bgt_thread);
1122
1123
1124
1125
1126
1127 get_device(&ubi->dev);
1128
1129 ubi_debugfs_exit_dev(ubi);
1130 uif_close(ubi);
1131
1132 ubi_wl_close(ubi);
1133 ubi_free_internal_volumes(ubi);
1134 vfree(ubi->vtbl);
1135 put_mtd_device(ubi->mtd);
1136 vfree(ubi->peb_buf);
1137 vfree(ubi->fm_buf);
1138 ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
1139 put_device(&ubi->dev);
1140 return 0;
1141}
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1152{
1153 int err, minor;
1154 struct path path;
1155 struct kstat stat;
1156
1157
1158 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1159 if (err)
1160 return ERR_PTR(err);
1161
1162 err = vfs_getattr(&path, &stat);
1163 path_put(&path);
1164 if (err)
1165 return ERR_PTR(err);
1166
1167
1168 if (MAJOR(stat.rdev) != MTD_CHAR_MAJOR || !S_ISCHR(stat.mode))
1169 return ERR_PTR(-EINVAL);
1170
1171 minor = MINOR(stat.rdev);
1172
1173 if (minor & 1)
1174
1175
1176
1177
1178 return ERR_PTR(-EINVAL);
1179
1180 return get_mtd_device(NULL, minor / 2);
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1194{
1195 struct mtd_info *mtd;
1196 int mtd_num;
1197 char *endp;
1198
1199 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1200 if (*endp != '\0' || mtd_dev == endp) {
1201
1202
1203
1204
1205 mtd = get_mtd_device_nm(mtd_dev);
1206 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
1207
1208 mtd = open_mtd_by_chdev(mtd_dev);
1209 } else
1210 mtd = get_mtd_device(NULL, mtd_num);
1211
1212 return mtd;
1213}
1214
1215static int __init ubi_init(void)
1216{
1217 int err, i, k;
1218
1219
1220 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1221 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1222
1223 if (mtd_devs > UBI_MAX_DEVICES) {
1224 pr_err("UBI error: too many MTD devices, maximum is %d",
1225 UBI_MAX_DEVICES);
1226 return -EINVAL;
1227 }
1228
1229
1230 err = class_register(&ubi_class);
1231 if (err < 0)
1232 return err;
1233
1234 err = misc_register(&ubi_ctrl_cdev);
1235 if (err) {
1236 pr_err("UBI error: cannot register device");
1237 goto out;
1238 }
1239
1240 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
1241 sizeof(struct ubi_wl_entry),
1242 0, 0, NULL);
1243 if (!ubi_wl_entry_slab) {
1244 err = -ENOMEM;
1245 goto out_dev_unreg;
1246 }
1247
1248 err = ubi_debugfs_init();
1249 if (err)
1250 goto out_slab;
1251
1252
1253
1254 for (i = 0; i < mtd_devs; i++) {
1255 struct mtd_dev_param *p = &mtd_dev_param[i];
1256 struct mtd_info *mtd;
1257
1258 cond_resched();
1259
1260 mtd = open_mtd_device(p->name);
1261 if (IS_ERR(mtd)) {
1262 err = PTR_ERR(mtd);
1263 pr_err("UBI error: cannot open mtd %s, error %d",
1264 p->name, err);
1265
1266 if (ubi_is_module())
1267 goto out_detach;
1268 continue;
1269 }
1270
1271 mutex_lock(&ubi_devices_mutex);
1272 err = ubi_attach_mtd_dev(mtd, p->ubi_num,
1273 p->vid_hdr_offs, p->max_beb_per1024);
1274 mutex_unlock(&ubi_devices_mutex);
1275 if (err < 0) {
1276 pr_err("UBI error: cannot attach mtd%d",
1277 mtd->index);
1278 put_mtd_device(mtd);
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293 if (ubi_is_module())
1294 goto out_detach;
1295 }
1296 }
1297
1298 err = ubiblock_init();
1299 if (err) {
1300 pr_err("UBI error: block: cannot initialize, error %d", err);
1301
1302
1303 if (ubi_is_module())
1304 goto out_detach;
1305 }
1306
1307 return 0;
1308
1309out_detach:
1310 for (k = 0; k < i; k++)
1311 if (ubi_devices[k]) {
1312 mutex_lock(&ubi_devices_mutex);
1313 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1314 mutex_unlock(&ubi_devices_mutex);
1315 }
1316 ubi_debugfs_exit();
1317out_slab:
1318 kmem_cache_destroy(ubi_wl_entry_slab);
1319out_dev_unreg:
1320 misc_deregister(&ubi_ctrl_cdev);
1321out:
1322 class_unregister(&ubi_class);
1323 pr_err("UBI error: cannot initialize UBI, error %d", err);
1324 return err;
1325}
1326late_initcall(ubi_init);
1327
1328static void __exit ubi_exit(void)
1329{
1330 int i;
1331
1332 ubiblock_exit();
1333
1334 for (i = 0; i < UBI_MAX_DEVICES; i++)
1335 if (ubi_devices[i]) {
1336 mutex_lock(&ubi_devices_mutex);
1337 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1338 mutex_unlock(&ubi_devices_mutex);
1339 }
1340 ubi_debugfs_exit();
1341 kmem_cache_destroy(ubi_wl_entry_slab);
1342 misc_deregister(&ubi_ctrl_cdev);
1343 class_unregister(&ubi_class);
1344}
1345module_exit(ubi_exit);
1346
1347
1348
1349
1350
1351
1352
1353
1354static int __init bytes_str_to_int(const char *str)
1355{
1356 char *endp;
1357 unsigned long result;
1358
1359 result = simple_strtoul(str, &endp, 0);
1360 if (str == endp || result >= INT_MAX) {
1361 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1362 return -EINVAL;
1363 }
1364
1365 switch (*endp) {
1366 case 'G':
1367 result *= 1024;
1368 case 'M':
1369 result *= 1024;
1370 case 'K':
1371 result *= 1024;
1372 if (endp[1] == 'i' && endp[2] == 'B')
1373 endp += 2;
1374 case '\0':
1375 break;
1376 default:
1377 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1378 return -EINVAL;
1379 }
1380
1381 return result;
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1393{
1394 int i, len;
1395 struct mtd_dev_param *p;
1396 char buf[MTD_PARAM_LEN_MAX];
1397 char *pbuf = &buf[0];
1398 char *tokens[MTD_PARAM_MAX_COUNT], *token;
1399
1400 if (!val)
1401 return -EINVAL;
1402
1403 if (mtd_devs == UBI_MAX_DEVICES) {
1404 pr_err("UBI error: too many parameters, max. is %d\n",
1405 UBI_MAX_DEVICES);
1406 return -EINVAL;
1407 }
1408
1409 len = strnlen(val, MTD_PARAM_LEN_MAX);
1410 if (len == MTD_PARAM_LEN_MAX) {
1411 pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1412 val, MTD_PARAM_LEN_MAX);
1413 return -EINVAL;
1414 }
1415
1416 if (len == 0) {
1417 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
1418 return 0;
1419 }
1420
1421 strcpy(buf, val);
1422
1423
1424 if (buf[len - 1] == '\n')
1425 buf[len - 1] = '\0';
1426
1427 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
1428 tokens[i] = strsep(&pbuf, ",");
1429
1430 if (pbuf) {
1431 pr_err("UBI error: too many arguments at \"%s\"\n", val);
1432 return -EINVAL;
1433 }
1434
1435 p = &mtd_dev_param[mtd_devs];
1436 strcpy(&p->name[0], tokens[0]);
1437
1438 token = tokens[1];
1439 if (token) {
1440 p->vid_hdr_offs = bytes_str_to_int(token);
1441
1442 if (p->vid_hdr_offs < 0)
1443 return p->vid_hdr_offs;
1444 }
1445
1446 token = tokens[2];
1447 if (token) {
1448 int err = kstrtoint(token, 10, &p->max_beb_per1024);
1449
1450 if (err) {
1451 pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1452 token);
1453 return -EINVAL;
1454 }
1455 }
1456
1457 token = tokens[3];
1458 if (token) {
1459 int err = kstrtoint(token, 10, &p->ubi_num);
1460
1461 if (err) {
1462 pr_err("UBI error: bad value for ubi_num parameter: %s",
1463 token);
1464 return -EINVAL;
1465 }
1466 } else
1467 p->ubi_num = UBI_DEV_NUM_AUTO;
1468
1469 mtd_devs += 1;
1470 return 0;
1471}
1472
1473module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
1474MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
1475 "Multiple \"mtd\" parameters may be specified.\n"
1476 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1477 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1478 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1479 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
1480 "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
1481 "\n"
1482 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1483 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1484 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
1485 "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
1486 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
1487#ifdef CONFIG_MTD_UBI_FASTMAP
1488module_param(fm_autoconvert, bool, 0644);
1489MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
1490module_param(fm_debug, bool, 0);
1491MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
1492#endif
1493MODULE_VERSION(__stringify(UBI_VERSION));
1494MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1495MODULE_AUTHOR("Artem Bityutskiy");
1496MODULE_LICENSE("GPL");
1497