1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/err.h>
37#include <linux/module.h>
38#include <linux/moduleparam.h>
39#include <linux/stringify.h>
40#include <linux/namei.h>
41#include <linux/stat.h>
42#include <linux/miscdevice.h>
43#include <linux/log2.h>
44#include <linux/kthread.h>
45#include <linux/kernel.h>
46#include <linux/slab.h>
47#include "ubi.h"
48
49
50#define MTD_PARAM_LEN_MAX 64
51
52#ifdef CONFIG_MTD_UBI_MODULE
53#define ubi_is_module() 1
54#else
55#define ubi_is_module() 0
56#endif
57
58
59
60
61
62
63
64struct mtd_dev_param {
65 char name[MTD_PARAM_LEN_MAX];
66 int vid_hdr_offs;
67};
68
69
70static int __initdata mtd_devs;
71
72
73static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
74
75
76struct class *ubi_class;
77
78
79struct kmem_cache *ubi_wl_entry_slab;
80
81
82static struct miscdevice ubi_ctrl_cdev = {
83 .minor = MISC_DYNAMIC_MINOR,
84 .name = "ubi_ctrl",
85 .fops = &ubi_ctrl_cdev_operations,
86};
87
88
89static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
90
91
92DEFINE_MUTEX(ubi_devices_mutex);
93
94
95static DEFINE_SPINLOCK(ubi_devices_lock);
96
97
98static ssize_t ubi_version_show(struct class *class,
99 struct class_attribute *attr, char *buf)
100{
101 return sprintf(buf, "%d\n", UBI_VERSION);
102}
103
104
105static struct class_attribute ubi_version =
106 __ATTR(version, S_IRUGO, ubi_version_show, NULL);
107
108static ssize_t dev_attribute_show(struct device *dev,
109 struct device_attribute *attr, char *buf);
110
111
112static struct device_attribute dev_eraseblock_size =
113 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
114static struct device_attribute dev_avail_eraseblocks =
115 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
116static struct device_attribute dev_total_eraseblocks =
117 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
118static struct device_attribute dev_volumes_count =
119 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
120static struct device_attribute dev_max_ec =
121 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
122static struct device_attribute dev_reserved_for_bad =
123 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
124static struct device_attribute dev_bad_peb_count =
125 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
126static struct device_attribute dev_max_vol_count =
127 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
128static struct device_attribute dev_min_io_size =
129 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
130static struct device_attribute dev_bgt_enabled =
131 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
132static struct device_attribute dev_mtd_num =
133 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
134
135
136
137
138
139
140
141
142
143
144
145int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
146{
147 struct ubi_notification nt;
148
149 ubi_do_get_device_info(ubi, &nt.di);
150 ubi_do_get_volume_info(ubi, vol, &nt.vi);
151 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
152}
153
154
155
156
157
158
159
160
161
162
163
164
165int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
166{
167 struct ubi_notification nt;
168 int i, count = 0;
169
170 ubi_do_get_device_info(ubi, &nt.di);
171
172 mutex_lock(&ubi->device_mutex);
173 for (i = 0; i < ubi->vtbl_slots; i++) {
174
175
176
177
178
179 if (!ubi->volumes[i])
180 continue;
181
182 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
183 if (nb)
184 nb->notifier_call(nb, ntype, &nt);
185 else
186 blocking_notifier_call_chain(&ubi_notifiers, ntype,
187 &nt);
188 count += 1;
189 }
190 mutex_unlock(&ubi->device_mutex);
191
192 return count;
193}
194
195
196
197
198
199
200
201
202
203
204int ubi_enumerate_volumes(struct notifier_block *nb)
205{
206 int i, count = 0;
207
208
209
210
211
212 for (i = 0; i < UBI_MAX_DEVICES; i++) {
213 struct ubi_device *ubi = ubi_devices[i];
214
215 if (!ubi)
216 continue;
217 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
218 }
219
220 return count;
221}
222
223
224
225
226
227
228
229
230
231
232struct ubi_device *ubi_get_device(int ubi_num)
233{
234 struct ubi_device *ubi;
235
236 spin_lock(&ubi_devices_lock);
237 ubi = ubi_devices[ubi_num];
238 if (ubi) {
239 ubi_assert(ubi->ref_count >= 0);
240 ubi->ref_count += 1;
241 get_device(&ubi->dev);
242 }
243 spin_unlock(&ubi_devices_lock);
244
245 return ubi;
246}
247
248
249
250
251
252void ubi_put_device(struct ubi_device *ubi)
253{
254 spin_lock(&ubi_devices_lock);
255 ubi->ref_count -= 1;
256 put_device(&ubi->dev);
257 spin_unlock(&ubi_devices_lock);
258}
259
260
261
262
263
264
265
266
267struct ubi_device *ubi_get_by_major(int major)
268{
269 int i;
270 struct ubi_device *ubi;
271
272 spin_lock(&ubi_devices_lock);
273 for (i = 0; i < UBI_MAX_DEVICES; i++) {
274 ubi = ubi_devices[i];
275 if (ubi && MAJOR(ubi->cdev.dev) == major) {
276 ubi_assert(ubi->ref_count >= 0);
277 ubi->ref_count += 1;
278 get_device(&ubi->dev);
279 spin_unlock(&ubi_devices_lock);
280 return ubi;
281 }
282 }
283 spin_unlock(&ubi_devices_lock);
284
285 return NULL;
286}
287
288
289
290
291
292
293
294
295
296int ubi_major2num(int major)
297{
298 int i, ubi_num = -ENODEV;
299
300 spin_lock(&ubi_devices_lock);
301 for (i = 0; i < UBI_MAX_DEVICES; i++) {
302 struct ubi_device *ubi = ubi_devices[i];
303
304 if (ubi && MAJOR(ubi->cdev.dev) == major) {
305 ubi_num = ubi->ubi_num;
306 break;
307 }
308 }
309 spin_unlock(&ubi_devices_lock);
310
311 return ubi_num;
312}
313
314
315static ssize_t dev_attribute_show(struct device *dev,
316 struct device_attribute *attr, char *buf)
317{
318 ssize_t ret;
319 struct ubi_device *ubi;
320
321
322
323
324
325
326
327
328
329
330
331 ubi = container_of(dev, struct ubi_device, dev);
332 ubi = ubi_get_device(ubi->ubi_num);
333 if (!ubi)
334 return -ENODEV;
335
336 if (attr == &dev_eraseblock_size)
337 ret = sprintf(buf, "%d\n", ubi->leb_size);
338 else if (attr == &dev_avail_eraseblocks)
339 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
340 else if (attr == &dev_total_eraseblocks)
341 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
342 else if (attr == &dev_volumes_count)
343 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
344 else if (attr == &dev_max_ec)
345 ret = sprintf(buf, "%d\n", ubi->max_ec);
346 else if (attr == &dev_reserved_for_bad)
347 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
348 else if (attr == &dev_bad_peb_count)
349 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
350 else if (attr == &dev_max_vol_count)
351 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
352 else if (attr == &dev_min_io_size)
353 ret = sprintf(buf, "%d\n", ubi->min_io_size);
354 else if (attr == &dev_bgt_enabled)
355 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
356 else if (attr == &dev_mtd_num)
357 ret = sprintf(buf, "%d\n", ubi->mtd->index);
358 else
359 ret = -EINVAL;
360
361 ubi_put_device(ubi);
362 return ret;
363}
364
365static void dev_release(struct device *dev)
366{
367 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
368
369 kfree(ubi);
370}
371
372
373
374
375
376
377
378
379
380
381static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
382{
383 int err;
384
385 ubi->dev.release = dev_release;
386 ubi->dev.devt = ubi->cdev.dev;
387 ubi->dev.class = ubi_class;
388 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
389 err = device_register(&ubi->dev);
390 if (err)
391 return err;
392
393 *ref = 1;
394 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
395 if (err)
396 return err;
397 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
398 if (err)
399 return err;
400 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
401 if (err)
402 return err;
403 err = device_create_file(&ubi->dev, &dev_volumes_count);
404 if (err)
405 return err;
406 err = device_create_file(&ubi->dev, &dev_max_ec);
407 if (err)
408 return err;
409 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
410 if (err)
411 return err;
412 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
413 if (err)
414 return err;
415 err = device_create_file(&ubi->dev, &dev_max_vol_count);
416 if (err)
417 return err;
418 err = device_create_file(&ubi->dev, &dev_min_io_size);
419 if (err)
420 return err;
421 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
422 if (err)
423 return err;
424 err = device_create_file(&ubi->dev, &dev_mtd_num);
425 return err;
426}
427
428
429
430
431
432static void ubi_sysfs_close(struct ubi_device *ubi)
433{
434 device_remove_file(&ubi->dev, &dev_mtd_num);
435 device_remove_file(&ubi->dev, &dev_bgt_enabled);
436 device_remove_file(&ubi->dev, &dev_min_io_size);
437 device_remove_file(&ubi->dev, &dev_max_vol_count);
438 device_remove_file(&ubi->dev, &dev_bad_peb_count);
439 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
440 device_remove_file(&ubi->dev, &dev_max_ec);
441 device_remove_file(&ubi->dev, &dev_volumes_count);
442 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
443 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
444 device_remove_file(&ubi->dev, &dev_eraseblock_size);
445 device_unregister(&ubi->dev);
446}
447
448
449
450
451
452static void kill_volumes(struct ubi_device *ubi)
453{
454 int i;
455
456 for (i = 0; i < ubi->vtbl_slots; i++)
457 if (ubi->volumes[i])
458 ubi_free_volume(ubi, ubi->volumes[i]);
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479static int uif_init(struct ubi_device *ubi, int *ref)
480{
481 int i, err;
482 dev_t dev;
483
484 *ref = 0;
485 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
486
487
488
489
490
491
492
493
494
495 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
496 if (err) {
497 ubi_err("cannot register UBI character devices");
498 return err;
499 }
500
501 ubi_assert(MINOR(dev) == 0);
502 cdev_init(&ubi->cdev, &ubi_cdev_operations);
503 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
504 ubi->cdev.owner = THIS_MODULE;
505
506 err = cdev_add(&ubi->cdev, dev, 1);
507 if (err) {
508 ubi_err("cannot add character device");
509 goto out_unreg;
510 }
511
512 err = ubi_sysfs_init(ubi, ref);
513 if (err)
514 goto out_sysfs;
515
516 for (i = 0; i < ubi->vtbl_slots; i++)
517 if (ubi->volumes[i]) {
518 err = ubi_add_volume(ubi, ubi->volumes[i]);
519 if (err) {
520 ubi_err("cannot add volume %d", i);
521 goto out_volumes;
522 }
523 }
524
525 return 0;
526
527out_volumes:
528 kill_volumes(ubi);
529out_sysfs:
530 if (*ref)
531 get_device(&ubi->dev);
532 ubi_sysfs_close(ubi);
533 cdev_del(&ubi->cdev);
534out_unreg:
535 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
536 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
537 return err;
538}
539
540
541
542
543
544
545
546
547
548static void uif_close(struct ubi_device *ubi)
549{
550 kill_volumes(ubi);
551 ubi_sysfs_close(ubi);
552 cdev_del(&ubi->cdev);
553 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
554}
555
556
557
558
559
560static void free_internal_volumes(struct ubi_device *ubi)
561{
562 int i;
563
564 for (i = ubi->vtbl_slots;
565 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
566 kfree(ubi->volumes[i]->eba_tbl);
567 kfree(ubi->volumes[i]);
568 }
569}
570
571
572
573
574
575
576
577
578
579
580
581
582
583static int attach_by_scanning(struct ubi_device *ubi)
584{
585 int err;
586 struct ubi_scan_info *si;
587
588 si = ubi_scan(ubi);
589 if (IS_ERR(si))
590 return PTR_ERR(si);
591
592 ubi->bad_peb_count = si->bad_peb_count;
593 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
594 ubi->corr_peb_count = si->corr_peb_count;
595 ubi->max_ec = si->max_ec;
596 ubi->mean_ec = si->mean_ec;
597 ubi_msg("max. sequence number: %llu", si->max_sqnum);
598
599 err = ubi_read_volume_table(ubi, si);
600 if (err)
601 goto out_si;
602
603 err = ubi_wl_init_scan(ubi, si);
604 if (err)
605 goto out_vtbl;
606
607 err = ubi_eba_init_scan(ubi, si);
608 if (err)
609 goto out_wl;
610
611 ubi_scan_destroy_si(si);
612 return 0;
613
614out_wl:
615 ubi_wl_close(ubi);
616out_vtbl:
617 free_internal_volumes(ubi);
618 vfree(ubi->vtbl);
619out_si:
620 ubi_scan_destroy_si(si);
621 return err;
622}
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639static int io_init(struct ubi_device *ubi)
640{
641 if (ubi->mtd->numeraseregions != 0) {
642
643
644
645
646
647
648
649
650
651 ubi_err("multiple regions, not implemented");
652 return -EINVAL;
653 }
654
655 if (ubi->vid_hdr_offset < 0)
656 return -EINVAL;
657
658
659
660
661
662
663 ubi->peb_size = ubi->mtd->erasesize;
664 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
665 ubi->flash_size = ubi->mtd->size;
666
667 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
668 ubi->bad_allowed = 1;
669
670 if (ubi->mtd->type == MTD_NORFLASH) {
671 ubi_assert(ubi->mtd->writesize == 1);
672 ubi->nor_flash = 1;
673 }
674
675 ubi->min_io_size = ubi->mtd->writesize;
676 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
677
678
679
680
681
682
683 if (!is_power_of_2(ubi->min_io_size)) {
684 ubi_err("min. I/O unit (%d) is not power of 2",
685 ubi->min_io_size);
686 return -EINVAL;
687 }
688
689 ubi_assert(ubi->hdrs_min_io_size > 0);
690 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
691 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
692
693 ubi->max_write_size = ubi->mtd->writebufsize;
694
695
696
697
698 if (ubi->max_write_size < ubi->min_io_size ||
699 ubi->max_write_size % ubi->min_io_size ||
700 !is_power_of_2(ubi->max_write_size)) {
701 ubi_err("bad write buffer size %d for %d min. I/O unit",
702 ubi->max_write_size, ubi->min_io_size);
703 return -EINVAL;
704 }
705
706
707 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
708 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
709
710 dbg_msg("min_io_size %d", ubi->min_io_size);
711 dbg_msg("max_write_size %d", ubi->max_write_size);
712 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
713 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
714 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
715
716 if (ubi->vid_hdr_offset == 0)
717
718 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
719 ubi->ec_hdr_alsize;
720 else {
721 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
722 ~(ubi->hdrs_min_io_size - 1);
723 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
724 ubi->vid_hdr_aloffset;
725 }
726
727
728 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
729 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
730
731 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
732 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
733 dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
734 dbg_msg("leb_start %d", ubi->leb_start);
735
736
737 if (ubi->vid_hdr_shift % 4) {
738 ubi_err("unaligned VID header shift %d",
739 ubi->vid_hdr_shift);
740 return -EINVAL;
741 }
742
743
744 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
745 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
746 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
747 ubi->leb_start & (ubi->min_io_size - 1)) {
748 ubi_err("bad VID header (%d) or data offsets (%d)",
749 ubi->vid_hdr_offset, ubi->leb_start);
750 return -EINVAL;
751 }
752
753
754
755
756
757 ubi->max_erroneous = ubi->peb_count / 10;
758 if (ubi->max_erroneous < 16)
759 ubi->max_erroneous = 16;
760 dbg_msg("max_erroneous %d", ubi->max_erroneous);
761
762
763
764
765
766
767 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
768 ubi_warn("EC and VID headers are in the same minimal I/O unit, "
769 "switch to read-only mode");
770 ubi->ro_mode = 1;
771 }
772
773 ubi->leb_size = ubi->peb_size - ubi->leb_start;
774
775 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
776 ubi_msg("MTD device %d is write-protected, attach in "
777 "read-only mode", ubi->mtd->index);
778 ubi->ro_mode = 1;
779 }
780
781 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
782 ubi->peb_size, ubi->peb_size >> 10);
783 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
784 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
785 if (ubi->hdrs_min_io_size != ubi->min_io_size)
786 ubi_msg("sub-page size: %d",
787 ubi->hdrs_min_io_size);
788 ubi_msg("VID header offset: %d (aligned %d)",
789 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
790 ubi_msg("data offset: %d", ubi->leb_start);
791
792
793
794
795
796
797
798
799
800 return 0;
801}
802
803
804
805
806
807
808
809
810
811
812
813static int autoresize(struct ubi_device *ubi, int vol_id)
814{
815 struct ubi_volume_desc desc;
816 struct ubi_volume *vol = ubi->volumes[vol_id];
817 int err, old_reserved_pebs = vol->reserved_pebs;
818
819
820
821
822
823
824 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
825
826 if (ubi->avail_pebs == 0) {
827 struct ubi_vtbl_record vtbl_rec;
828
829
830
831
832
833 memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
834 sizeof(struct ubi_vtbl_record));
835 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
836 if (err)
837 ubi_err("cannot clean auto-resize flag for volume %d",
838 vol_id);
839 } else {
840 desc.vol = vol;
841 err = ubi_resize_volume(&desc,
842 old_reserved_pebs + ubi->avail_pebs);
843 if (err)
844 ubi_err("cannot auto-resize volume %d", vol_id);
845 }
846
847 if (err)
848 return err;
849
850 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
851 vol->name, old_reserved_pebs, vol->reserved_pebs);
852 return 0;
853}
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
871{
872 struct ubi_device *ubi;
873 int i, err, ref = 0;
874
875
876
877
878
879
880
881 for (i = 0; i < UBI_MAX_DEVICES; i++) {
882 ubi = ubi_devices[i];
883 if (ubi && mtd->index == ubi->mtd->index) {
884 dbg_err("mtd%d is already attached to ubi%d",
885 mtd->index, i);
886 return -EEXIST;
887 }
888 }
889
890
891
892
893
894
895
896
897
898 if (mtd->type == MTD_UBIVOLUME) {
899 ubi_err("refuse attaching mtd%d - it is already emulated on "
900 "top of UBI", mtd->index);
901 return -EINVAL;
902 }
903
904 if (ubi_num == UBI_DEV_NUM_AUTO) {
905
906 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
907 if (!ubi_devices[ubi_num])
908 break;
909 if (ubi_num == UBI_MAX_DEVICES) {
910 dbg_err("only %d UBI devices may be created",
911 UBI_MAX_DEVICES);
912 return -ENFILE;
913 }
914 } else {
915 if (ubi_num >= UBI_MAX_DEVICES)
916 return -EINVAL;
917
918
919 if (ubi_devices[ubi_num]) {
920 dbg_err("ubi%d already exists", ubi_num);
921 return -EEXIST;
922 }
923 }
924
925 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
926 if (!ubi)
927 return -ENOMEM;
928
929 ubi->mtd = mtd;
930 ubi->ubi_num = ubi_num;
931 ubi->vid_hdr_offset = vid_hdr_offset;
932 ubi->autoresize_vol_id = -1;
933
934 mutex_init(&ubi->buf_mutex);
935 mutex_init(&ubi->ckvol_mutex);
936 mutex_init(&ubi->device_mutex);
937 spin_lock_init(&ubi->volumes_lock);
938
939 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
940 dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
941 dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
942
943 err = io_init(ubi);
944 if (err)
945 goto out_free;
946
947 err = -ENOMEM;
948 ubi->peb_buf1 = vmalloc(ubi->peb_size);
949 if (!ubi->peb_buf1)
950 goto out_free;
951
952 ubi->peb_buf2 = vmalloc(ubi->peb_size);
953 if (!ubi->peb_buf2)
954 goto out_free;
955
956 err = attach_by_scanning(ubi);
957 if (err) {
958 dbg_err("failed to attach by scanning, error %d", err);
959 goto out_free;
960 }
961
962 if (ubi->autoresize_vol_id != -1) {
963 err = autoresize(ubi, ubi->autoresize_vol_id);
964 if (err)
965 goto out_detach;
966 }
967
968 err = uif_init(ubi, &ref);
969 if (err)
970 goto out_detach;
971
972 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
973 if (IS_ERR(ubi->bgt_thread)) {
974 err = PTR_ERR(ubi->bgt_thread);
975 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
976 err);
977 goto out_uif;
978 }
979
980 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
981 ubi_msg("MTD device name: \"%s\"", mtd->name);
982 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
983 ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
984 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
985 ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count);
986 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
987 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
988 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
989 ubi_msg("number of user volumes: %d",
990 ubi->vol_count - UBI_INT_VOL_COUNT);
991 ubi_msg("available PEBs: %d", ubi->avail_pebs);
992 ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
993 ubi_msg("number of PEBs reserved for bad PEB handling: %d",
994 ubi->beb_rsvd_pebs);
995 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
996 ubi_msg("image sequence number: %d", ubi->image_seq);
997
998
999
1000
1001
1002 spin_lock(&ubi->wl_lock);
1003 ubi->thread_enabled = 1;
1004 wake_up_process(ubi->bgt_thread);
1005 spin_unlock(&ubi->wl_lock);
1006
1007 ubi_devices[ubi_num] = ubi;
1008 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
1009 return ubi_num;
1010
1011out_uif:
1012 uif_close(ubi);
1013out_detach:
1014 ubi_wl_close(ubi);
1015 free_internal_volumes(ubi);
1016 vfree(ubi->vtbl);
1017out_free:
1018 vfree(ubi->peb_buf1);
1019 vfree(ubi->peb_buf2);
1020 if (ref)
1021 put_device(&ubi->dev);
1022 else
1023 kfree(ubi);
1024 return err;
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040int ubi_detach_mtd_dev(int ubi_num, int anyway)
1041{
1042 struct ubi_device *ubi;
1043
1044 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1045 return -EINVAL;
1046
1047 ubi = ubi_get_device(ubi_num);
1048 if (!ubi)
1049 return -EINVAL;
1050
1051 spin_lock(&ubi_devices_lock);
1052 put_device(&ubi->dev);
1053 ubi->ref_count -= 1;
1054 if (ubi->ref_count) {
1055 if (!anyway) {
1056 spin_unlock(&ubi_devices_lock);
1057 return -EBUSY;
1058 }
1059
1060 ubi_err("%s reference count %d, destroy anyway",
1061 ubi->ubi_name, ubi->ref_count);
1062 }
1063 ubi_devices[ubi_num] = NULL;
1064 spin_unlock(&ubi_devices_lock);
1065
1066 ubi_assert(ubi_num == ubi->ubi_num);
1067 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
1068 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
1069
1070
1071
1072
1073
1074 if (ubi->bgt_thread)
1075 kthread_stop(ubi->bgt_thread);
1076
1077
1078
1079
1080
1081 get_device(&ubi->dev);
1082
1083 uif_close(ubi);
1084 ubi_wl_close(ubi);
1085 free_internal_volumes(ubi);
1086 vfree(ubi->vtbl);
1087 put_mtd_device(ubi->mtd);
1088 vfree(ubi->peb_buf1);
1089 vfree(ubi->peb_buf2);
1090 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
1091 put_device(&ubi->dev);
1092 return 0;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1104{
1105 int err, major, minor, mode;
1106 struct path path;
1107
1108
1109 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1110 if (err)
1111 return ERR_PTR(err);
1112
1113
1114 major = imajor(path.dentry->d_inode);
1115 minor = iminor(path.dentry->d_inode);
1116 mode = path.dentry->d_inode->i_mode;
1117 path_put(&path);
1118 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
1119 return ERR_PTR(-EINVAL);
1120
1121 if (minor & 1)
1122
1123
1124
1125
1126 return ERR_PTR(-EINVAL);
1127
1128 return get_mtd_device(NULL, minor / 2);
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1142{
1143 struct mtd_info *mtd;
1144 int mtd_num;
1145 char *endp;
1146
1147 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1148 if (*endp != '\0' || mtd_dev == endp) {
1149
1150
1151
1152
1153 mtd = get_mtd_device_nm(mtd_dev);
1154 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
1155
1156 mtd = open_mtd_by_chdev(mtd_dev);
1157 } else
1158 mtd = get_mtd_device(NULL, mtd_num);
1159
1160 return mtd;
1161}
1162
1163static int __init ubi_init(void)
1164{
1165 int err, i, k;
1166
1167
1168 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1169 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1170
1171 if (mtd_devs > UBI_MAX_DEVICES) {
1172 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
1173 return -EINVAL;
1174 }
1175
1176
1177 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
1178 if (IS_ERR(ubi_class)) {
1179 err = PTR_ERR(ubi_class);
1180 ubi_err("cannot create UBI class");
1181 goto out;
1182 }
1183
1184 err = class_create_file(ubi_class, &ubi_version);
1185 if (err) {
1186 ubi_err("cannot create sysfs file");
1187 goto out_class;
1188 }
1189
1190 err = misc_register(&ubi_ctrl_cdev);
1191 if (err) {
1192 ubi_err("cannot register device");
1193 goto out_version;
1194 }
1195
1196 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
1197 sizeof(struct ubi_wl_entry),
1198 0, 0, NULL);
1199 if (!ubi_wl_entry_slab)
1200 goto out_dev_unreg;
1201
1202
1203 for (i = 0; i < mtd_devs; i++) {
1204 struct mtd_dev_param *p = &mtd_dev_param[i];
1205 struct mtd_info *mtd;
1206
1207 cond_resched();
1208
1209 mtd = open_mtd_device(p->name);
1210 if (IS_ERR(mtd)) {
1211 err = PTR_ERR(mtd);
1212 goto out_detach;
1213 }
1214
1215 mutex_lock(&ubi_devices_mutex);
1216 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
1217 p->vid_hdr_offs);
1218 mutex_unlock(&ubi_devices_mutex);
1219 if (err < 0) {
1220 ubi_err("cannot attach mtd%d", mtd->index);
1221 put_mtd_device(mtd);
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236 if (ubi_is_module())
1237 goto out_detach;
1238 }
1239 }
1240
1241 return 0;
1242
1243out_detach:
1244 for (k = 0; k < i; k++)
1245 if (ubi_devices[k]) {
1246 mutex_lock(&ubi_devices_mutex);
1247 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1248 mutex_unlock(&ubi_devices_mutex);
1249 }
1250 kmem_cache_destroy(ubi_wl_entry_slab);
1251out_dev_unreg:
1252 misc_deregister(&ubi_ctrl_cdev);
1253out_version:
1254 class_remove_file(ubi_class, &ubi_version);
1255out_class:
1256 class_destroy(ubi_class);
1257out:
1258 ubi_err("UBI error: cannot initialize UBI, error %d", err);
1259 return err;
1260}
1261module_init(ubi_init);
1262
1263static void __exit ubi_exit(void)
1264{
1265 int i;
1266
1267 for (i = 0; i < UBI_MAX_DEVICES; i++)
1268 if (ubi_devices[i]) {
1269 mutex_lock(&ubi_devices_mutex);
1270 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1271 mutex_unlock(&ubi_devices_mutex);
1272 }
1273 kmem_cache_destroy(ubi_wl_entry_slab);
1274 misc_deregister(&ubi_ctrl_cdev);
1275 class_remove_file(ubi_class, &ubi_version);
1276 class_destroy(ubi_class);
1277}
1278module_exit(ubi_exit);
1279
1280
1281
1282
1283
1284
1285
1286
1287static int __init bytes_str_to_int(const char *str)
1288{
1289 char *endp;
1290 unsigned long result;
1291
1292 result = simple_strtoul(str, &endp, 0);
1293 if (str == endp || result >= INT_MAX) {
1294 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1295 str);
1296 return -EINVAL;
1297 }
1298
1299 switch (*endp) {
1300 case 'G':
1301 result *= 1024;
1302 case 'M':
1303 result *= 1024;
1304 case 'K':
1305 result *= 1024;
1306 if (endp[1] == 'i' && endp[2] == 'B')
1307 endp += 2;
1308 case '\0':
1309 break;
1310 default:
1311 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1312 str);
1313 return -EINVAL;
1314 }
1315
1316 return result;
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1328{
1329 int i, len;
1330 struct mtd_dev_param *p;
1331 char buf[MTD_PARAM_LEN_MAX];
1332 char *pbuf = &buf[0];
1333 char *tokens[2] = {NULL, NULL};
1334
1335 if (!val)
1336 return -EINVAL;
1337
1338 if (mtd_devs == UBI_MAX_DEVICES) {
1339 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
1340 UBI_MAX_DEVICES);
1341 return -EINVAL;
1342 }
1343
1344 len = strnlen(val, MTD_PARAM_LEN_MAX);
1345 if (len == MTD_PARAM_LEN_MAX) {
1346 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
1347 "max. is %d\n", val, MTD_PARAM_LEN_MAX);
1348 return -EINVAL;
1349 }
1350
1351 if (len == 0) {
1352 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
1353 "ignored\n");
1354 return 0;
1355 }
1356
1357 strcpy(buf, val);
1358
1359
1360 if (buf[len - 1] == '\n')
1361 buf[len - 1] = '\0';
1362
1363 for (i = 0; i < 2; i++)
1364 tokens[i] = strsep(&pbuf, ",");
1365
1366 if (pbuf) {
1367 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
1368 val);
1369 return -EINVAL;
1370 }
1371
1372 p = &mtd_dev_param[mtd_devs];
1373 strcpy(&p->name[0], tokens[0]);
1374
1375 if (tokens[1])
1376 p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
1377
1378 if (p->vid_hdr_offs < 0)
1379 return p->vid_hdr_offs;
1380
1381 mtd_devs += 1;
1382 return 0;
1383}
1384
1385module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
1386MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
1387 "mtd=<name|num|path>[,<vid_hdr_offs>].\n"
1388 "Multiple \"mtd\" parameters may be specified.\n"
1389 "MTD devices may be specified by their number, name, or "
1390 "path to the MTD character device node.\n"
1391 "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
1392 "header position to be used by UBI.\n"
1393 "Example 1: mtd=/dev/mtd0 - attach MTD device "
1394 "/dev/mtd0.\n"
1395 "Example 2: mtd=content,1984 mtd=4 - attach MTD device "
1396 "with name \"content\" using VID header offset 1984, and "
1397 "MTD device number 4 with default VID header offset.");
1398
1399MODULE_VERSION(__stringify(UBI_VERSION));
1400MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1401MODULE_AUTHOR("Artem Bityutskiy");
1402MODULE_LICENSE("GPL");
1403