1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#ifdef UBI_LINUX
37#include <linux/err.h>
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/stringify.h>
41#include <linux/stat.h>
42#include <linux/miscdevice.h>
43#include <linux/log2.h>
44#include <linux/kthread.h>
45#endif
46#include <ubi_uboot.h>
47#include "ubi.h"
48
49#if (CONFIG_SYS_MALLOC_LEN < (512 << 10))
50#error Malloc area too small for UBI, increase CONFIG_SYS_MALLOC_LEN to >= 512k
51#endif
52
53
54#define MTD_PARAM_LEN_MAX 64
55
56
57
58
59
60
61struct mtd_dev_param
62{
63 char name[MTD_PARAM_LEN_MAX];
64 int vid_hdr_offs;
65};
66
67
68static int mtd_devs = 0;
69
70
71static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
72
73
74struct class *ubi_class;
75
76#ifdef UBI_LINUX
77
78struct kmem_cache *ubi_wl_entry_slab;
79
80
81static struct miscdevice ubi_ctrl_cdev = {
82 .minor = MISC_DYNAMIC_MINOR,
83 .name = "ubi_ctrl",
84 .fops = &ubi_ctrl_cdev_operations,
85};
86#endif
87
88
89struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
90
91#ifdef UBI_LINUX
92
93DEFINE_MUTEX(ubi_devices_mutex);
94
95
96static DEFINE_SPINLOCK(ubi_devices_lock);
97
98
99static ssize_t ubi_version_show(struct class *class, char *buf)
100{
101 return sprintf(buf, "%d\n", UBI_VERSION);
102}
103
104
105static struct class_attribute ubi_version =
106 __ATTR(version, S_IRUGO, ubi_version_show, NULL);
107
108static ssize_t dev_attribute_show(struct device *dev,
109 struct device_attribute *attr, char *buf);
110
111
112static struct device_attribute dev_eraseblock_size =
113 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
114static struct device_attribute dev_avail_eraseblocks =
115 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
116static struct device_attribute dev_total_eraseblocks =
117 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
118static struct device_attribute dev_volumes_count =
119 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
120static struct device_attribute dev_max_ec =
121 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
122static struct device_attribute dev_reserved_for_bad =
123 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
124static struct device_attribute dev_bad_peb_count =
125 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
126static struct device_attribute dev_max_vol_count =
127 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
128static struct device_attribute dev_min_io_size =
129 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
130static struct device_attribute dev_bgt_enabled =
131 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
132static struct device_attribute dev_mtd_num =
133 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
134#endif
135
136
137
138
139
140
141
142
143
144
145struct ubi_device *ubi_get_device(int ubi_num)
146{
147 struct ubi_device *ubi;
148
149 spin_lock(&ubi_devices_lock);
150 ubi = ubi_devices[ubi_num];
151 if (ubi) {
152 ubi_assert(ubi->ref_count >= 0);
153 ubi->ref_count += 1;
154 get_device(&ubi->dev);
155 }
156 spin_unlock(&ubi_devices_lock);
157
158 return ubi;
159}
160
161
162
163
164
165void ubi_put_device(struct ubi_device *ubi)
166{
167 spin_lock(&ubi_devices_lock);
168 ubi->ref_count -= 1;
169 put_device(&ubi->dev);
170 spin_unlock(&ubi_devices_lock);
171}
172
173
174
175
176
177
178
179
180
181struct ubi_device *ubi_get_by_major(int major)
182{
183 int i;
184 struct ubi_device *ubi;
185
186 spin_lock(&ubi_devices_lock);
187 for (i = 0; i < UBI_MAX_DEVICES; i++) {
188 ubi = ubi_devices[i];
189 if (ubi && MAJOR(ubi->cdev.dev) == major) {
190 ubi_assert(ubi->ref_count >= 0);
191 ubi->ref_count += 1;
192 get_device(&ubi->dev);
193 spin_unlock(&ubi_devices_lock);
194 return ubi;
195 }
196 }
197 spin_unlock(&ubi_devices_lock);
198
199 return NULL;
200}
201
202
203
204
205
206
207
208
209
210int ubi_major2num(int major)
211{
212 int i, ubi_num = -ENODEV;
213
214 spin_lock(&ubi_devices_lock);
215 for (i = 0; i < UBI_MAX_DEVICES; i++) {
216 struct ubi_device *ubi = ubi_devices[i];
217
218 if (ubi && MAJOR(ubi->cdev.dev) == major) {
219 ubi_num = ubi->ubi_num;
220 break;
221 }
222 }
223 spin_unlock(&ubi_devices_lock);
224
225 return ubi_num;
226}
227
228#ifdef UBI_LINUX
229
230static ssize_t dev_attribute_show(struct device *dev,
231 struct device_attribute *attr, char *buf)
232{
233 ssize_t ret;
234 struct ubi_device *ubi;
235
236
237
238
239
240
241
242
243
244
245
246 ubi = container_of(dev, struct ubi_device, dev);
247 ubi = ubi_get_device(ubi->ubi_num);
248 if (!ubi)
249 return -ENODEV;
250
251 if (attr == &dev_eraseblock_size)
252 ret = sprintf(buf, "%d\n", ubi->leb_size);
253 else if (attr == &dev_avail_eraseblocks)
254 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
255 else if (attr == &dev_total_eraseblocks)
256 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
257 else if (attr == &dev_volumes_count)
258 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
259 else if (attr == &dev_max_ec)
260 ret = sprintf(buf, "%d\n", ubi->max_ec);
261 else if (attr == &dev_reserved_for_bad)
262 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
263 else if (attr == &dev_bad_peb_count)
264 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
265 else if (attr == &dev_max_vol_count)
266 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
267 else if (attr == &dev_min_io_size)
268 ret = sprintf(buf, "%d\n", ubi->min_io_size);
269 else if (attr == &dev_bgt_enabled)
270 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
271 else if (attr == &dev_mtd_num)
272 ret = sprintf(buf, "%d\n", ubi->mtd->index);
273 else
274 ret = -EINVAL;
275
276 ubi_put_device(ubi);
277 return ret;
278}
279
280
281static void dev_release(struct device *dev) { }
282
283
284
285
286
287
288
289
290static int ubi_sysfs_init(struct ubi_device *ubi)
291{
292 int err;
293
294 ubi->dev.release = dev_release;
295 ubi->dev.devt = ubi->cdev.dev;
296 ubi->dev.class = ubi_class;
297 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
298 err = device_register(&ubi->dev);
299 if (err)
300 return err;
301
302 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
303 if (err)
304 return err;
305 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
306 if (err)
307 return err;
308 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
309 if (err)
310 return err;
311 err = device_create_file(&ubi->dev, &dev_volumes_count);
312 if (err)
313 return err;
314 err = device_create_file(&ubi->dev, &dev_max_ec);
315 if (err)
316 return err;
317 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
318 if (err)
319 return err;
320 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
321 if (err)
322 return err;
323 err = device_create_file(&ubi->dev, &dev_max_vol_count);
324 if (err)
325 return err;
326 err = device_create_file(&ubi->dev, &dev_min_io_size);
327 if (err)
328 return err;
329 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
330 if (err)
331 return err;
332 err = device_create_file(&ubi->dev, &dev_mtd_num);
333 return err;
334}
335
336
337
338
339
340static void ubi_sysfs_close(struct ubi_device *ubi)
341{
342 device_remove_file(&ubi->dev, &dev_mtd_num);
343 device_remove_file(&ubi->dev, &dev_bgt_enabled);
344 device_remove_file(&ubi->dev, &dev_min_io_size);
345 device_remove_file(&ubi->dev, &dev_max_vol_count);
346 device_remove_file(&ubi->dev, &dev_bad_peb_count);
347 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
348 device_remove_file(&ubi->dev, &dev_max_ec);
349 device_remove_file(&ubi->dev, &dev_volumes_count);
350 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
351 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
352 device_remove_file(&ubi->dev, &dev_eraseblock_size);
353 device_unregister(&ubi->dev);
354}
355#endif
356
357
358
359
360
361static void kill_volumes(struct ubi_device *ubi)
362{
363 int i;
364
365 for (i = 0; i < ubi->vtbl_slots; i++)
366 if (ubi->volumes[i])
367 ubi_free_volume(ubi, ubi->volumes[i]);
368}
369
370
371
372
373
374
375
376
377static int uif_init(struct ubi_device *ubi)
378{
379 int i, err;
380#ifdef UBI_LINUX
381 dev_t dev;
382#endif
383
384 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
385
386
387
388
389
390
391
392
393
394 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
395 if (err) {
396 ubi_err("cannot register UBI character devices");
397 return err;
398 }
399
400 ubi_assert(MINOR(dev) == 0);
401 cdev_init(&ubi->cdev, &ubi_cdev_operations);
402 dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
403 ubi->cdev.owner = THIS_MODULE;
404
405 err = cdev_add(&ubi->cdev, dev, 1);
406 if (err) {
407 ubi_err("cannot add character device");
408 goto out_unreg;
409 }
410
411 err = ubi_sysfs_init(ubi);
412 if (err)
413 goto out_sysfs;
414
415 for (i = 0; i < ubi->vtbl_slots; i++)
416 if (ubi->volumes[i]) {
417 err = ubi_add_volume(ubi, ubi->volumes[i]);
418 if (err) {
419 ubi_err("cannot add volume %d", i);
420 goto out_volumes;
421 }
422 }
423
424 return 0;
425
426out_volumes:
427 kill_volumes(ubi);
428out_sysfs:
429 ubi_sysfs_close(ubi);
430 cdev_del(&ubi->cdev);
431out_unreg:
432 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
433 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
434 return err;
435}
436
437
438
439
440
441static void uif_close(struct ubi_device *ubi)
442{
443 kill_volumes(ubi);
444 ubi_sysfs_close(ubi);
445 cdev_del(&ubi->cdev);
446 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
447}
448
449
450
451
452
453
454
455
456
457
458
459
460
461static int attach_by_scanning(struct ubi_device *ubi)
462{
463 int err;
464 struct ubi_scan_info *si;
465
466 si = ubi_scan(ubi);
467 if (IS_ERR(si))
468 return PTR_ERR(si);
469
470 ubi->bad_peb_count = si->bad_peb_count;
471 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
472 ubi->max_ec = si->max_ec;
473 ubi->mean_ec = si->mean_ec;
474
475 err = ubi_read_volume_table(ubi, si);
476 if (err)
477 goto out_si;
478
479 err = ubi_wl_init_scan(ubi, si);
480 if (err)
481 goto out_vtbl;
482
483 err = ubi_eba_init_scan(ubi, si);
484 if (err)
485 goto out_wl;
486
487 ubi_scan_destroy_si(si);
488 return 0;
489
490out_wl:
491 ubi_wl_close(ubi);
492out_vtbl:
493 vfree(ubi->vtbl);
494out_si:
495 ubi_scan_destroy_si(si);
496 return err;
497}
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514static int io_init(struct ubi_device *ubi)
515{
516 if (ubi->mtd->numeraseregions != 0) {
517
518
519
520
521
522
523
524
525
526 ubi_err("multiple regions, not implemented");
527 return -EINVAL;
528 }
529
530 if (ubi->vid_hdr_offset < 0)
531 return -EINVAL;
532
533
534
535
536
537
538 ubi->peb_size = ubi->mtd->erasesize;
539 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
540 ubi->flash_size = ubi->mtd->size;
541
542 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
543 ubi->bad_allowed = 1;
544
545 ubi->min_io_size = ubi->mtd->writesize;
546 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
547
548
549
550
551
552
553 if (!is_power_of_2(ubi->min_io_size)) {
554 ubi_err("min. I/O unit (%d) is not power of 2",
555 ubi->min_io_size);
556 return -EINVAL;
557 }
558
559 ubi_assert(ubi->hdrs_min_io_size > 0);
560 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
561 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
562
563
564 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
565 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
566
567 dbg_msg("min_io_size %d", ubi->min_io_size);
568 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
569 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
570 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
571
572 if (ubi->vid_hdr_offset == 0)
573
574 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
575 ubi->ec_hdr_alsize;
576 else {
577 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
578 ~(ubi->hdrs_min_io_size - 1);
579 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
580 ubi->vid_hdr_aloffset;
581 }
582
583
584 ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
585 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
586
587 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
588 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
589 dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
590 dbg_msg("leb_start %d", ubi->leb_start);
591
592
593 if (ubi->vid_hdr_shift % 4) {
594 ubi_err("unaligned VID header shift %d",
595 ubi->vid_hdr_shift);
596 return -EINVAL;
597 }
598
599
600 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
601 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
602 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
603 ubi->leb_start & (ubi->min_io_size - 1)) {
604 ubi_err("bad VID header (%d) or data offsets (%d)",
605 ubi->vid_hdr_offset, ubi->leb_start);
606 return -EINVAL;
607 }
608
609
610
611
612
613
614 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
615 ubi_warn("EC and VID headers are in the same minimal I/O unit, "
616 "switch to read-only mode");
617 ubi->ro_mode = 1;
618 }
619
620 ubi->leb_size = ubi->peb_size - ubi->leb_start;
621
622 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
623 ubi_msg("MTD device %d is write-protected, attach in "
624 "read-only mode", ubi->mtd->index);
625 ubi->ro_mode = 1;
626 }
627
628 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
629 ubi->peb_size, ubi->peb_size >> 10);
630 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
631 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
632 if (ubi->hdrs_min_io_size != ubi->min_io_size)
633 ubi_msg("sub-page size: %d",
634 ubi->hdrs_min_io_size);
635 ubi_msg("VID header offset: %d (aligned %d)",
636 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
637 ubi_msg("data offset: %d", ubi->leb_start);
638
639
640
641
642
643
644
645
646
647 return 0;
648}
649
650
651
652
653
654
655
656
657
658
659
660static int autoresize(struct ubi_device *ubi, int vol_id)
661{
662 struct ubi_volume_desc desc;
663 struct ubi_volume *vol = ubi->volumes[vol_id];
664 int err, old_reserved_pebs = vol->reserved_pebs;
665
666
667
668
669
670
671 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
672
673 if (ubi->avail_pebs == 0) {
674 struct ubi_vtbl_record vtbl_rec;
675
676
677
678
679
680 memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
681 sizeof(struct ubi_vtbl_record));
682 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
683 if (err)
684 ubi_err("cannot clean auto-resize flag for volume %d",
685 vol_id);
686 } else {
687 desc.vol = vol;
688 err = ubi_resize_volume(&desc,
689 old_reserved_pebs + ubi->avail_pebs);
690 if (err)
691 ubi_err("cannot auto-resize volume %d", vol_id);
692 }
693
694 if (err)
695 return err;
696
697 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
698 vol->name, old_reserved_pebs, vol->reserved_pebs);
699 return 0;
700}
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
718{
719 struct ubi_device *ubi;
720 int i, err;
721
722
723
724
725
726
727
728 for (i = 0; i < UBI_MAX_DEVICES; i++) {
729 ubi = ubi_devices[i];
730 if (ubi && mtd->index == ubi->mtd->index) {
731 dbg_err("mtd%d is already attached to ubi%d",
732 mtd->index, i);
733 return -EEXIST;
734 }
735 }
736
737
738
739
740
741
742
743
744
745 if (mtd->type == MTD_UBIVOLUME) {
746 ubi_err("refuse attaching mtd%d - it is already emulated on "
747 "top of UBI", mtd->index);
748 return -EINVAL;
749 }
750
751 if (ubi_num == UBI_DEV_NUM_AUTO) {
752
753 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
754 if (!ubi_devices[ubi_num])
755 break;
756 if (ubi_num == UBI_MAX_DEVICES) {
757 dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
758 return -ENFILE;
759 }
760 } else {
761 if (ubi_num >= UBI_MAX_DEVICES)
762 return -EINVAL;
763
764
765 if (ubi_devices[ubi_num]) {
766 dbg_err("ubi%d already exists", ubi_num);
767 return -EEXIST;
768 }
769 }
770
771 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
772 if (!ubi)
773 return -ENOMEM;
774
775 ubi->mtd = mtd;
776 ubi->ubi_num = ubi_num;
777 ubi->vid_hdr_offset = vid_hdr_offset;
778 ubi->autoresize_vol_id = -1;
779
780 mutex_init(&ubi->buf_mutex);
781 mutex_init(&ubi->ckvol_mutex);
782 mutex_init(&ubi->volumes_mutex);
783 spin_lock_init(&ubi->volumes_lock);
784
785 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
786
787 err = io_init(ubi);
788 if (err)
789 goto out_free;
790
791 err = -ENOMEM;
792 ubi->peb_buf1 = vmalloc(ubi->peb_size);
793 if (!ubi->peb_buf1)
794 goto out_free;
795
796 ubi->peb_buf2 = vmalloc(ubi->peb_size);
797 if (!ubi->peb_buf2)
798 goto out_free;
799
800#ifdef CONFIG_MTD_UBI_DEBUG
801 mutex_init(&ubi->dbg_buf_mutex);
802 ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
803 if (!ubi->dbg_peb_buf)
804 goto out_free;
805#endif
806
807 err = attach_by_scanning(ubi);
808 if (err) {
809 dbg_err("failed to attach by scanning, error %d", err);
810 goto out_free;
811 }
812
813 if (ubi->autoresize_vol_id != -1) {
814 err = autoresize(ubi, ubi->autoresize_vol_id);
815 if (err)
816 goto out_detach;
817 }
818
819 err = uif_init(ubi);
820 if (err)
821 goto out_detach;
822
823 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
824 if (IS_ERR(ubi->bgt_thread)) {
825 err = PTR_ERR(ubi->bgt_thread);
826 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
827 err);
828 goto out_uif;
829 }
830
831 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
832 ubi_msg("MTD device name: \"%s\"", mtd->name);
833 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
834 ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
835 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
836 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
837 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
838 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
839 ubi_msg("number of user volumes: %d",
840 ubi->vol_count - UBI_INT_VOL_COUNT);
841 ubi_msg("available PEBs: %d", ubi->avail_pebs);
842 ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
843 ubi_msg("number of PEBs reserved for bad PEB handling: %d",
844 ubi->beb_rsvd_pebs);
845 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
846
847
848 if (!DBG_DISABLE_BGT) {
849 ubi->thread_enabled = 1;
850 wake_up_process(ubi->bgt_thread);
851 }
852
853 ubi_devices[ubi_num] = ubi;
854 return ubi_num;
855
856out_uif:
857 uif_close(ubi);
858out_detach:
859 ubi_eba_close(ubi);
860 ubi_wl_close(ubi);
861 vfree(ubi->vtbl);
862out_free:
863 vfree(ubi->peb_buf1);
864 vfree(ubi->peb_buf2);
865#ifdef CONFIG_MTD_UBI_DEBUG
866 vfree(ubi->dbg_peb_buf);
867#endif
868 kfree(ubi);
869 return err;
870}
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885int ubi_detach_mtd_dev(int ubi_num, int anyway)
886{
887 struct ubi_device *ubi;
888
889 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
890 return -EINVAL;
891
892 spin_lock(&ubi_devices_lock);
893 ubi = ubi_devices[ubi_num];
894 if (!ubi) {
895 spin_unlock(&ubi_devices_lock);
896 return -EINVAL;
897 }
898
899 if (ubi->ref_count) {
900 if (!anyway) {
901 spin_unlock(&ubi_devices_lock);
902 return -EBUSY;
903 }
904
905 ubi_err("%s reference count %d, destroy anyway",
906 ubi->ubi_name, ubi->ref_count);
907 }
908 ubi_devices[ubi_num] = NULL;
909 spin_unlock(&ubi_devices_lock);
910
911 ubi_assert(ubi_num == ubi->ubi_num);
912 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
913
914
915
916
917
918 if (ubi->bgt_thread)
919 kthread_stop(ubi->bgt_thread);
920
921 uif_close(ubi);
922 ubi_eba_close(ubi);
923 ubi_wl_close(ubi);
924 vfree(ubi->vtbl);
925 put_mtd_device(ubi->mtd);
926 vfree(ubi->peb_buf1);
927 vfree(ubi->peb_buf2);
928#ifdef CONFIG_MTD_UBI_DEBUG
929 vfree(ubi->dbg_peb_buf);
930#endif
931 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
932 kfree(ubi);
933 return 0;
934}
935
936
937
938
939
940
941
942
943
944
945static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
946{
947 struct mtd_info *mtd;
948 int mtd_num;
949 char *endp;
950
951 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
952 if (*endp != '\0' || mtd_dev == endp) {
953
954
955
956
957 mtd = get_mtd_device_nm(mtd_dev);
958 } else
959 mtd = get_mtd_device(NULL, mtd_num);
960
961 return mtd;
962}
963
964int __init ubi_init(void)
965{
966 int err, i, k;
967
968
969 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
970 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
971
972 if (mtd_devs > UBI_MAX_DEVICES) {
973 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
974 return -EINVAL;
975 }
976
977
978 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
979 if (IS_ERR(ubi_class)) {
980 err = PTR_ERR(ubi_class);
981 ubi_err("cannot create UBI class");
982 goto out;
983 }
984
985 err = class_create_file(ubi_class, &ubi_version);
986 if (err) {
987 ubi_err("cannot create sysfs file");
988 goto out_class;
989 }
990
991 err = misc_register(&ubi_ctrl_cdev);
992 if (err) {
993 ubi_err("cannot register device");
994 goto out_version;
995 }
996
997#ifdef UBI_LINUX
998 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
999 sizeof(struct ubi_wl_entry),
1000 0, 0, NULL);
1001 if (!ubi_wl_entry_slab)
1002 goto out_dev_unreg;
1003#endif
1004
1005
1006 for (i = 0; i < mtd_devs; i++) {
1007 struct mtd_dev_param *p = &mtd_dev_param[i];
1008 struct mtd_info *mtd;
1009
1010 cond_resched();
1011
1012 mtd = open_mtd_device(p->name);
1013 if (IS_ERR(mtd)) {
1014 err = PTR_ERR(mtd);
1015 goto out_detach;
1016 }
1017
1018 mutex_lock(&ubi_devices_mutex);
1019 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
1020 p->vid_hdr_offs);
1021 mutex_unlock(&ubi_devices_mutex);
1022 if (err < 0) {
1023 put_mtd_device(mtd);
1024 ubi_err("cannot attach mtd%d", mtd->index);
1025 goto out_detach;
1026 }
1027 }
1028
1029 return 0;
1030
1031out_detach:
1032 for (k = 0; k < i; k++)
1033 if (ubi_devices[k]) {
1034 mutex_lock(&ubi_devices_mutex);
1035 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1036 mutex_unlock(&ubi_devices_mutex);
1037 }
1038#ifdef UBI_LINUX
1039 kmem_cache_destroy(ubi_wl_entry_slab);
1040out_dev_unreg:
1041#endif
1042 misc_deregister(&ubi_ctrl_cdev);
1043out_version:
1044 class_remove_file(ubi_class, &ubi_version);
1045out_class:
1046 class_destroy(ubi_class);
1047out:
1048 mtd_devs = 0;
1049 ubi_err("UBI error: cannot initialize UBI, error %d", err);
1050 return err;
1051}
1052module_init(ubi_init);
1053
1054void __exit ubi_exit(void)
1055{
1056 int i;
1057
1058 for (i = 0; i < UBI_MAX_DEVICES; i++)
1059 if (ubi_devices[i]) {
1060 mutex_lock(&ubi_devices_mutex);
1061 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1062 mutex_unlock(&ubi_devices_mutex);
1063 }
1064 kmem_cache_destroy(ubi_wl_entry_slab);
1065 misc_deregister(&ubi_ctrl_cdev);
1066 class_remove_file(ubi_class, &ubi_version);
1067 class_destroy(ubi_class);
1068 mtd_devs = 0;
1069}
1070module_exit(ubi_exit);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static int __init bytes_str_to_int(const char *str)
1081{
1082 char *endp;
1083 unsigned long result;
1084
1085 result = simple_strtoul(str, &endp, 0);
1086 if (str == endp || result < 0) {
1087 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1088 str);
1089 return -EINVAL;
1090 }
1091
1092 switch (*endp) {
1093 case 'G':
1094 result *= 1024;
1095 case 'M':
1096 result *= 1024;
1097 case 'K':
1098 result *= 1024;
1099 if (endp[1] == 'i' && endp[2] == 'B')
1100 endp += 2;
1101 case '\0':
1102 break;
1103 default:
1104 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1105 str);
1106 return -EINVAL;
1107 }
1108
1109 return result;
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1121{
1122 int i, len;
1123 struct mtd_dev_param *p;
1124 char buf[MTD_PARAM_LEN_MAX];
1125 char *pbuf = &buf[0];
1126 char *tokens[2] = {NULL, NULL};
1127
1128 if (!val)
1129 return -EINVAL;
1130
1131 if (mtd_devs == UBI_MAX_DEVICES) {
1132 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
1133 UBI_MAX_DEVICES);
1134 return -EINVAL;
1135 }
1136
1137 len = strnlen(val, MTD_PARAM_LEN_MAX);
1138 if (len == MTD_PARAM_LEN_MAX) {
1139 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
1140 "max. is %d\n", val, MTD_PARAM_LEN_MAX);
1141 return -EINVAL;
1142 }
1143
1144 if (len == 0) {
1145 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
1146 "ignored\n");
1147 return 0;
1148 }
1149
1150 strcpy(buf, val);
1151
1152
1153 if (buf[len - 1] == '\n')
1154 buf[len - 1] = '\0';
1155
1156 for (i = 0; i < 2; i++)
1157 tokens[i] = strsep(&pbuf, ",");
1158
1159 if (pbuf) {
1160 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
1161 val);
1162 return -EINVAL;
1163 }
1164
1165 p = &mtd_dev_param[mtd_devs];
1166 strcpy(&p->name[0], tokens[0]);
1167
1168 if (tokens[1])
1169 p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
1170
1171 if (p->vid_hdr_offs < 0)
1172 return p->vid_hdr_offs;
1173
1174 mtd_devs += 1;
1175 return 0;
1176}
1177
1178module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
1179MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
1180 "mtd=<name|num>[,<vid_hdr_offs>].\n"
1181 "Multiple \"mtd\" parameters may be specified.\n"
1182 "MTD devices may be specified by their number or name.\n"
1183 "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
1184 "header position and data starting position to be used "
1185 "by UBI.\n"
1186 "Example: mtd=content,1984 mtd=4 - attach MTD device"
1187 "with name \"content\" using VID header offset 1984, and "
1188 "MTD device number 4 with default VID header offset.");
1189
1190MODULE_VERSION(__stringify(UBI_VERSION));
1191MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1192MODULE_AUTHOR("Artem Bityutskiy");
1193MODULE_LICENSE("GPL");
1194