1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85#include <linux/err.h>
86#include <linux/slab.h>
87#include <linux/crc32.h>
88#include <linux/math64.h>
89#include <linux/random.h>
90#include "ubi.h"
91
92static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
93
94
95static struct ubi_ec_hdr *ech;
96static struct ubi_vid_hdr *vidh;
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
120 int lnum, int ec, int to_head, struct list_head *list)
121{
122 struct ubi_ainf_peb *aeb;
123
124 if (list == &ai->free) {
125 dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
126 } else if (list == &ai->erase) {
127 dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
128 } else if (list == &ai->alien) {
129 dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
130 ai->alien_peb_count += 1;
131 } else
132 BUG();
133
134 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
135 if (!aeb)
136 return -ENOMEM;
137
138 aeb->pnum = pnum;
139 aeb->vol_id = vol_id;
140 aeb->lnum = lnum;
141 aeb->ec = ec;
142 if (to_head)
143 list_add(&aeb->u.list, list);
144 else
145 list_add_tail(&aeb->u.list, list);
146 return 0;
147}
148
149
150
151
152
153
154
155
156
157
158
159
160static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
161{
162 struct ubi_ainf_peb *aeb;
163
164 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
165
166 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
167 if (!aeb)
168 return -ENOMEM;
169
170 ai->corr_peb_count += 1;
171 aeb->pnum = pnum;
172 aeb->ec = ec;
173 list_add(&aeb->u.list, &ai->corr);
174 return 0;
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192static int validate_vid_hdr(const struct ubi_device *ubi,
193 const struct ubi_vid_hdr *vid_hdr,
194 const struct ubi_ainf_volume *av, int pnum)
195{
196 int vol_type = vid_hdr->vol_type;
197 int vol_id = be32_to_cpu(vid_hdr->vol_id);
198 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
199 int data_pad = be32_to_cpu(vid_hdr->data_pad);
200
201 if (av->leb_count != 0) {
202 int av_vol_type;
203
204
205
206
207
208
209
210 if (vol_id != av->vol_id) {
211 ubi_err(ubi, "inconsistent vol_id");
212 goto bad;
213 }
214
215 if (av->vol_type == UBI_STATIC_VOLUME)
216 av_vol_type = UBI_VID_STATIC;
217 else
218 av_vol_type = UBI_VID_DYNAMIC;
219
220 if (vol_type != av_vol_type) {
221 ubi_err(ubi, "inconsistent vol_type");
222 goto bad;
223 }
224
225 if (used_ebs != av->used_ebs) {
226 ubi_err(ubi, "inconsistent used_ebs");
227 goto bad;
228 }
229
230 if (data_pad != av->data_pad) {
231 ubi_err(ubi, "inconsistent data_pad");
232 goto bad;
233 }
234 }
235
236 return 0;
237
238bad:
239 ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
240 ubi_dump_vid_hdr(vid_hdr);
241 ubi_dump_av(av);
242 return -EINVAL;
243}
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
259 int vol_id, int pnum,
260 const struct ubi_vid_hdr *vid_hdr)
261{
262 struct ubi_ainf_volume *av;
263 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
264
265 ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
266
267
268 while (*p) {
269 parent = *p;
270 av = rb_entry(parent, struct ubi_ainf_volume, rb);
271
272 if (vol_id == av->vol_id)
273 return av;
274
275 if (vol_id > av->vol_id)
276 p = &(*p)->rb_left;
277 else
278 p = &(*p)->rb_right;
279 }
280
281
282 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
283 if (!av)
284 return ERR_PTR(-ENOMEM);
285
286 av->highest_lnum = av->leb_count = 0;
287 av->vol_id = vol_id;
288 av->root = RB_ROOT;
289 av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
290 av->data_pad = be32_to_cpu(vid_hdr->data_pad);
291 av->compat = vid_hdr->compat;
292 av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
293 : UBI_STATIC_VOLUME;
294 if (vol_id > ai->highest_vol_id)
295 ai->highest_vol_id = vol_id;
296
297 rb_link_node(&av->rb, parent, p);
298 rb_insert_color(&av->rb, &ai->volumes);
299 ai->vols_found += 1;
300 dbg_bld("added volume %d", vol_id);
301 return av;
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
325 int pnum, const struct ubi_vid_hdr *vid_hdr)
326{
327 int len, err, second_is_newer, bitflips = 0, corrupted = 0;
328 uint32_t data_crc, crc;
329 struct ubi_vid_hdr *vh = NULL;
330 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
331
332 if (sqnum2 == aeb->sqnum) {
333
334
335
336
337
338
339
340
341 ubi_err(ubi, "unsupported on-flash UBI format");
342 return -EINVAL;
343 }
344
345
346 second_is_newer = (sqnum2 > aeb->sqnum);
347
348
349
350
351
352
353
354
355
356
357 if (second_is_newer) {
358 if (!vid_hdr->copy_flag) {
359
360 dbg_bld("second PEB %d is newer, copy_flag is unset",
361 pnum);
362 return 1;
363 }
364 } else {
365 if (!aeb->copy_flag) {
366
367 dbg_bld("first PEB %d is newer, copy_flag is unset",
368 pnum);
369 return bitflips << 1;
370 }
371
372 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
373 if (!vh)
374 return -ENOMEM;
375
376 pnum = aeb->pnum;
377 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
378 if (err) {
379 if (err == UBI_IO_BITFLIPS)
380 bitflips = 1;
381 else {
382 ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
383 pnum, err);
384 if (err > 0)
385 err = -EIO;
386
387 goto out_free_vidh;
388 }
389 }
390
391 vid_hdr = vh;
392 }
393
394
395
396 len = be32_to_cpu(vid_hdr->data_size);
397
398 mutex_lock(&ubi->buf_mutex);
399 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
400 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
401 goto out_unlock;
402
403 data_crc = be32_to_cpu(vid_hdr->data_crc);
404 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
405 if (crc != data_crc) {
406 dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
407 pnum, crc, data_crc);
408 corrupted = 1;
409 bitflips = 0;
410 second_is_newer = !second_is_newer;
411 } else {
412 dbg_bld("PEB %d CRC is OK", pnum);
413 bitflips |= !!err;
414 }
415 mutex_unlock(&ubi->buf_mutex);
416
417 ubi_free_vid_hdr(ubi, vh);
418
419 if (second_is_newer)
420 dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
421 else
422 dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
423
424 return second_is_newer | (bitflips << 1) | (corrupted << 2);
425
426out_unlock:
427 mutex_unlock(&ubi->buf_mutex);
428out_free_vidh:
429 ubi_free_vid_hdr(ubi, vh);
430 return err;
431}
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
450 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
451{
452 int err, vol_id, lnum;
453 unsigned long long sqnum;
454 struct ubi_ainf_volume *av;
455 struct ubi_ainf_peb *aeb;
456 struct rb_node **p, *parent = NULL;
457
458 vol_id = be32_to_cpu(vid_hdr->vol_id);
459 lnum = be32_to_cpu(vid_hdr->lnum);
460 sqnum = be64_to_cpu(vid_hdr->sqnum);
461
462 dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
463 pnum, vol_id, lnum, ec, sqnum, bitflips);
464
465 av = add_volume(ai, vol_id, pnum, vid_hdr);
466 if (IS_ERR(av))
467 return PTR_ERR(av);
468
469 if (ai->max_sqnum < sqnum)
470 ai->max_sqnum = sqnum;
471
472
473
474
475
476 p = &av->root.rb_node;
477 while (*p) {
478 int cmp_res;
479
480 parent = *p;
481 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
482 if (lnum != aeb->lnum) {
483 if (lnum < aeb->lnum)
484 p = &(*p)->rb_left;
485 else
486 p = &(*p)->rb_right;
487 continue;
488 }
489
490
491
492
493
494
495 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
496 aeb->pnum, aeb->sqnum, aeb->ec);
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511 if (aeb->sqnum == sqnum && sqnum != 0) {
512 ubi_err(ubi, "two LEBs with same sequence number %llu",
513 sqnum);
514 ubi_dump_aeb(aeb, 0);
515 ubi_dump_vid_hdr(vid_hdr);
516 return -EINVAL;
517 }
518
519
520
521
522
523 cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
524 if (cmp_res < 0)
525 return cmp_res;
526
527 if (cmp_res & 1) {
528
529
530
531
532 err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
533 if (err)
534 return err;
535
536 err = add_to_list(ai, aeb->pnum, aeb->vol_id,
537 aeb->lnum, aeb->ec, cmp_res & 4,
538 &ai->erase);
539 if (err)
540 return err;
541
542 aeb->ec = ec;
543 aeb->pnum = pnum;
544 aeb->vol_id = vol_id;
545 aeb->lnum = lnum;
546 aeb->scrub = ((cmp_res & 2) || bitflips);
547 aeb->copy_flag = vid_hdr->copy_flag;
548 aeb->sqnum = sqnum;
549
550 if (av->highest_lnum == lnum)
551 av->last_data_size =
552 be32_to_cpu(vid_hdr->data_size);
553
554 return 0;
555 } else {
556
557
558
559
560 return add_to_list(ai, pnum, vol_id, lnum, ec,
561 cmp_res & 4, &ai->erase);
562 }
563 }
564
565
566
567
568
569
570 err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
571 if (err)
572 return err;
573
574 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
575 if (!aeb)
576 return -ENOMEM;
577
578 aeb->ec = ec;
579 aeb->pnum = pnum;
580 aeb->vol_id = vol_id;
581 aeb->lnum = lnum;
582 aeb->scrub = bitflips;
583 aeb->copy_flag = vid_hdr->copy_flag;
584 aeb->sqnum = sqnum;
585
586 if (av->highest_lnum <= lnum) {
587 av->highest_lnum = lnum;
588 av->last_data_size = be32_to_cpu(vid_hdr->data_size);
589 }
590
591 av->leb_count += 1;
592 rb_link_node(&aeb->u.rb, parent, p);
593 rb_insert_color(&aeb->u.rb, &av->root);
594 return 0;
595}
596
597
598
599
600
601
602
603
604
605struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
606 int vol_id)
607{
608 struct ubi_ainf_volume *av;
609 struct rb_node *p = ai->volumes.rb_node;
610
611 while (p) {
612 av = rb_entry(p, struct ubi_ainf_volume, rb);
613
614 if (vol_id == av->vol_id)
615 return av;
616
617 if (vol_id > av->vol_id)
618 p = p->rb_left;
619 else
620 p = p->rb_right;
621 }
622
623 return NULL;
624}
625
626
627
628
629
630
631void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
632{
633 struct rb_node *rb;
634 struct ubi_ainf_peb *aeb;
635
636 dbg_bld("remove attaching information about volume %d", av->vol_id);
637
638 while ((rb = rb_first(&av->root))) {
639 aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
640 rb_erase(&aeb->u.rb, &av->root);
641 list_add_tail(&aeb->u.list, &ai->erase);
642 }
643
644 rb_erase(&av->rb, &ai->volumes);
645 kfree(av);
646 ai->vols_found -= 1;
647}
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662static int early_erase_peb(struct ubi_device *ubi,
663 const struct ubi_attach_info *ai, int pnum, int ec)
664{
665 int err;
666 struct ubi_ec_hdr *ec_hdr;
667
668 if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
669
670
671
672
673 ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
674 pnum, ec);
675 return -EINVAL;
676 }
677
678 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
679 if (!ec_hdr)
680 return -ENOMEM;
681
682 ec_hdr->ec = cpu_to_be64(ec);
683
684 err = ubi_io_sync_erase(ubi, pnum, 0);
685 if (err < 0)
686 goto out_free;
687
688 err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
689
690out_free:
691 kfree(ec_hdr);
692 return err;
693}
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
710 struct ubi_attach_info *ai)
711{
712 int err = 0;
713 struct ubi_ainf_peb *aeb, *tmp_aeb;
714
715 if (!list_empty(&ai->free)) {
716 aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
717 list_del(&aeb->u.list);
718 dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
719 return aeb;
720 }
721
722
723
724
725
726
727
728 list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
729 if (aeb->ec == UBI_UNKNOWN)
730 aeb->ec = ai->mean_ec;
731
732 err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
733 if (err)
734 continue;
735
736 aeb->ec += 1;
737 list_del(&aeb->u.list);
738 dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
739 return aeb;
740 }
741
742 ubi_err(ubi, "no free eraseblocks");
743 return ERR_PTR(-ENOSPC);
744}
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
764 int pnum)
765{
766 int err;
767
768 mutex_lock(&ubi->buf_mutex);
769 memset(ubi->peb_buf, 0x00, ubi->leb_size);
770
771 err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
772 ubi->leb_size);
773 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
774
775
776
777
778
779
780
781 err = 0;
782 goto out_unlock;
783 }
784
785 if (err)
786 goto out_unlock;
787
788 if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
789 goto out_unlock;
790
791 ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
792 pnum);
793 ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
794 ubi_dump_vid_hdr(vid_hdr);
795 pr_err("hexdump of PEB %d offset %d, length %d",
796 pnum, ubi->leb_start, ubi->leb_size);
797 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
798 ubi->peb_buf, ubi->leb_size, 1);
799 err = 1;
800
801out_unlock:
802 mutex_unlock(&ubi->buf_mutex);
803 return err;
804}
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
820 int pnum, int *vid, unsigned long long *sqnum)
821{
822 long long uninitialized_var(ec);
823 int err, bitflips = 0, vol_id = -1, ec_err = 0;
824
825 dbg_bld("scan PEB %d", pnum);
826
827
828 err = ubi_io_is_bad(ubi, pnum);
829 if (err < 0)
830 return err;
831 else if (err) {
832 ai->bad_peb_count += 1;
833 return 0;
834 }
835
836 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
837 if (err < 0)
838 return err;
839 switch (err) {
840 case 0:
841 break;
842 case UBI_IO_BITFLIPS:
843 bitflips = 1;
844 break;
845 case UBI_IO_FF:
846 ai->empty_peb_count += 1;
847 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
848 UBI_UNKNOWN, 0, &ai->erase);
849 case UBI_IO_FF_BITFLIPS:
850 ai->empty_peb_count += 1;
851 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
852 UBI_UNKNOWN, 1, &ai->erase);
853 case UBI_IO_BAD_HDR_EBADMSG:
854 case UBI_IO_BAD_HDR:
855
856
857
858
859
860 ec_err = err;
861 ec = UBI_UNKNOWN;
862 bitflips = 1;
863 break;
864 default:
865 ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
866 err);
867 return -EINVAL;
868 }
869
870 if (!ec_err) {
871 int image_seq;
872
873
874 if (ech->version != UBI_VERSION) {
875 ubi_err(ubi, "this UBI version is %d, image version is %d",
876 UBI_VERSION, (int)ech->version);
877 return -EINVAL;
878 }
879
880 ec = be64_to_cpu(ech->ec);
881 if (ec > UBI_MAX_ERASECOUNTER) {
882
883
884
885
886
887
888
889 ubi_err(ubi, "erase counter overflow, max is %d",
890 UBI_MAX_ERASECOUNTER);
891 ubi_dump_ec_hdr(ech);
892 return -EINVAL;
893 }
894
895
896
897
898
899
900
901
902
903
904
905
906 image_seq = be32_to_cpu(ech->image_seq);
907 if (!ubi->image_seq)
908 ubi->image_seq = image_seq;
909 if (image_seq && ubi->image_seq != image_seq) {
910 ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
911 image_seq, pnum, ubi->image_seq);
912 ubi_dump_ec_hdr(ech);
913 return -EINVAL;
914 }
915 }
916
917
918
919 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
920 if (err < 0)
921 return err;
922 switch (err) {
923 case 0:
924 break;
925 case UBI_IO_BITFLIPS:
926 bitflips = 1;
927 break;
928 case UBI_IO_BAD_HDR_EBADMSG:
929 if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
930
931
932
933
934
935
936 ai->maybe_bad_peb_count += 1;
937 case UBI_IO_BAD_HDR:
938 if (ec_err)
939
940
941
942
943
944
945
946
947
948
949
950
951 err = 0;
952 else
953
954
955
956
957 err = check_corruption(ubi, vidh, pnum);
958
959 if (err < 0)
960 return err;
961 else if (!err)
962
963 err = add_to_list(ai, pnum, UBI_UNKNOWN,
964 UBI_UNKNOWN, ec, 1, &ai->erase);
965 else
966
967 err = add_corrupted(ai, pnum, ec);
968 if (err)
969 return err;
970 goto adjust_mean_ec;
971 case UBI_IO_FF_BITFLIPS:
972 err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
973 ec, 1, &ai->erase);
974 if (err)
975 return err;
976 goto adjust_mean_ec;
977 case UBI_IO_FF:
978 if (ec_err || bitflips)
979 err = add_to_list(ai, pnum, UBI_UNKNOWN,
980 UBI_UNKNOWN, ec, 1, &ai->erase);
981 else
982 err = add_to_list(ai, pnum, UBI_UNKNOWN,
983 UBI_UNKNOWN, ec, 0, &ai->free);
984 if (err)
985 return err;
986 goto adjust_mean_ec;
987 default:
988 ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
989 err);
990 return -EINVAL;
991 }
992
993 vol_id = be32_to_cpu(vidh->vol_id);
994 if (vid)
995 *vid = vol_id;
996 if (sqnum)
997 *sqnum = be64_to_cpu(vidh->sqnum);
998 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
999 int lnum = be32_to_cpu(vidh->lnum);
1000
1001
1002 switch (vidh->compat) {
1003 case UBI_COMPAT_DELETE:
1004 if (vol_id != UBI_FM_SB_VOLUME_ID
1005 && vol_id != UBI_FM_DATA_VOLUME_ID) {
1006 ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
1007 vol_id, lnum);
1008 }
1009 err = add_to_list(ai, pnum, vol_id, lnum,
1010 ec, 1, &ai->erase);
1011 if (err)
1012 return err;
1013 return 0;
1014
1015 case UBI_COMPAT_RO:
1016 ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
1017 vol_id, lnum);
1018 ubi->ro_mode = 1;
1019 break;
1020
1021 case UBI_COMPAT_PRESERVE:
1022 ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
1023 vol_id, lnum);
1024 err = add_to_list(ai, pnum, vol_id, lnum,
1025 ec, 0, &ai->alien);
1026 if (err)
1027 return err;
1028 return 0;
1029
1030 case UBI_COMPAT_REJECT:
1031 ubi_err(ubi, "incompatible internal volume %d:%d found",
1032 vol_id, lnum);
1033 return -EINVAL;
1034 }
1035 }
1036
1037 if (ec_err)
1038 ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
1039 pnum);
1040 err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
1041 if (err)
1042 return err;
1043
1044adjust_mean_ec:
1045 if (!ec_err) {
1046 ai->ec_sum += ec;
1047 ai->ec_count += 1;
1048 if (ec > ai->max_ec)
1049 ai->max_ec = ec;
1050 if (ec < ai->min_ec)
1051 ai->min_ec = ec;
1052 }
1053
1054 return 0;
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1069{
1070 struct ubi_ainf_peb *aeb;
1071 int max_corr, peb_count;
1072
1073 peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
1074 max_corr = peb_count / 20 ?: 8;
1075
1076
1077
1078
1079
1080
1081 if (ai->corr_peb_count) {
1082 ubi_err(ubi, "%d PEBs are corrupted and preserved",
1083 ai->corr_peb_count);
1084 pr_err("Corrupted PEBs are:");
1085 list_for_each_entry(aeb, &ai->corr, u.list)
1086 pr_cont(" %d", aeb->pnum);
1087 pr_cont("\n");
1088
1089
1090
1091
1092
1093 if (ai->corr_peb_count >= max_corr) {
1094 ubi_err(ubi, "too many corrupted PEBs, refusing");
1095 return -EINVAL;
1096 }
1097 }
1098
1099 if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115 if (ai->maybe_bad_peb_count <= 2) {
1116 ai->is_empty = 1;
1117 ubi_msg(ubi, "empty MTD device detected");
1118 get_random_bytes(&ubi->image_seq,
1119 sizeof(ubi->image_seq));
1120 } else {
1121 ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
1122 return -EINVAL;
1123 }
1124
1125 }
1126
1127 return 0;
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
1138{
1139 struct ubi_ainf_peb *aeb;
1140 struct rb_node *this = av->root.rb_node;
1141
1142 while (this) {
1143 if (this->rb_left)
1144 this = this->rb_left;
1145 else if (this->rb_right)
1146 this = this->rb_right;
1147 else {
1148 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
1149 this = rb_parent(this);
1150 if (this) {
1151 if (this->rb_left == &aeb->u.rb)
1152 this->rb_left = NULL;
1153 else
1154 this->rb_right = NULL;
1155 }
1156
1157 kmem_cache_free(ai->aeb_slab_cache, aeb);
1158 }
1159 }
1160 kfree(av);
1161}
1162
1163
1164
1165
1166
1167static void destroy_ai(struct ubi_attach_info *ai)
1168{
1169 struct ubi_ainf_peb *aeb, *aeb_tmp;
1170 struct ubi_ainf_volume *av;
1171 struct rb_node *rb;
1172
1173 list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
1174 list_del(&aeb->u.list);
1175 kmem_cache_free(ai->aeb_slab_cache, aeb);
1176 }
1177 list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
1178 list_del(&aeb->u.list);
1179 kmem_cache_free(ai->aeb_slab_cache, aeb);
1180 }
1181 list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
1182 list_del(&aeb->u.list);
1183 kmem_cache_free(ai->aeb_slab_cache, aeb);
1184 }
1185 list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
1186 list_del(&aeb->u.list);
1187 kmem_cache_free(ai->aeb_slab_cache, aeb);
1188 }
1189
1190
1191 rb = ai->volumes.rb_node;
1192 while (rb) {
1193 if (rb->rb_left)
1194 rb = rb->rb_left;
1195 else if (rb->rb_right)
1196 rb = rb->rb_right;
1197 else {
1198 av = rb_entry(rb, struct ubi_ainf_volume, rb);
1199
1200 rb = rb_parent(rb);
1201 if (rb) {
1202 if (rb->rb_left == &av->rb)
1203 rb->rb_left = NULL;
1204 else
1205 rb->rb_right = NULL;
1206 }
1207
1208 destroy_av(ai, av);
1209 }
1210 }
1211
1212 if (ai->aeb_slab_cache)
1213 kmem_cache_destroy(ai->aeb_slab_cache);
1214
1215 kfree(ai);
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
1229 int start)
1230{
1231 int err, pnum;
1232 struct rb_node *rb1, *rb2;
1233 struct ubi_ainf_volume *av;
1234 struct ubi_ainf_peb *aeb;
1235
1236 err = -ENOMEM;
1237
1238 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1239 if (!ech)
1240 return err;
1241
1242 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1243 if (!vidh)
1244 goto out_ech;
1245
1246 for (pnum = start; pnum < ubi->peb_count; pnum++) {
1247 cond_resched();
1248
1249 dbg_gen("process PEB %d", pnum);
1250 err = scan_peb(ubi, ai, pnum, NULL, NULL);
1251 if (err < 0)
1252 goto out_vidh;
1253 }
1254
1255 ubi_msg(ubi, "scanning is finished");
1256
1257
1258 if (ai->ec_count)
1259 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
1260
1261 err = late_analysis(ubi, ai);
1262 if (err)
1263 goto out_vidh;
1264
1265
1266
1267
1268
1269 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1270 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1271 if (aeb->ec == UBI_UNKNOWN)
1272 aeb->ec = ai->mean_ec;
1273 }
1274
1275 list_for_each_entry(aeb, &ai->free, u.list) {
1276 if (aeb->ec == UBI_UNKNOWN)
1277 aeb->ec = ai->mean_ec;
1278 }
1279
1280 list_for_each_entry(aeb, &ai->corr, u.list)
1281 if (aeb->ec == UBI_UNKNOWN)
1282 aeb->ec = ai->mean_ec;
1283
1284 list_for_each_entry(aeb, &ai->erase, u.list)
1285 if (aeb->ec == UBI_UNKNOWN)
1286 aeb->ec = ai->mean_ec;
1287
1288 err = self_check_ai(ubi, ai);
1289 if (err)
1290 goto out_vidh;
1291
1292 ubi_free_vid_hdr(ubi, vidh);
1293 kfree(ech);
1294
1295 return 0;
1296
1297out_vidh:
1298 ubi_free_vid_hdr(ubi, vidh);
1299out_ech:
1300 kfree(ech);
1301 return err;
1302}
1303
1304static struct ubi_attach_info *alloc_ai(void)
1305{
1306 struct ubi_attach_info *ai;
1307
1308 ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
1309 if (!ai)
1310 return ai;
1311
1312 INIT_LIST_HEAD(&ai->corr);
1313 INIT_LIST_HEAD(&ai->free);
1314 INIT_LIST_HEAD(&ai->erase);
1315 INIT_LIST_HEAD(&ai->alien);
1316 ai->volumes = RB_ROOT;
1317 ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
1318 sizeof(struct ubi_ainf_peb),
1319 0, 0, NULL);
1320 if (!ai->aeb_slab_cache) {
1321 kfree(ai);
1322 ai = NULL;
1323 }
1324
1325 return ai;
1326}
1327
1328#ifdef CONFIG_MTD_UBI_FASTMAP
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
1341{
1342 int err, pnum, fm_anchor = -1;
1343 unsigned long long max_sqnum = 0;
1344
1345 err = -ENOMEM;
1346
1347 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1348 if (!ech)
1349 goto out;
1350
1351 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1352 if (!vidh)
1353 goto out_ech;
1354
1355 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
1356 int vol_id = -1;
1357 unsigned long long sqnum = -1;
1358 cond_resched();
1359
1360 dbg_gen("process PEB %d", pnum);
1361 err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
1362 if (err < 0)
1363 goto out_vidh;
1364
1365 if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
1366 max_sqnum = sqnum;
1367 fm_anchor = pnum;
1368 }
1369 }
1370
1371 ubi_free_vid_hdr(ubi, vidh);
1372 kfree(ech);
1373
1374 if (fm_anchor < 0)
1375 return UBI_NO_FASTMAP;
1376
1377 destroy_ai(*ai);
1378 *ai = alloc_ai();
1379 if (!*ai)
1380 return -ENOMEM;
1381
1382 return ubi_scan_fastmap(ubi, *ai, fm_anchor);
1383
1384out_vidh:
1385 ubi_free_vid_hdr(ubi, vidh);
1386out_ech:
1387 kfree(ech);
1388out:
1389 return err;
1390}
1391
1392#endif
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402int ubi_attach(struct ubi_device *ubi, int force_scan)
1403{
1404 int err;
1405 struct ubi_attach_info *ai;
1406
1407 ai = alloc_ai();
1408 if (!ai)
1409 return -ENOMEM;
1410
1411#ifdef CONFIG_MTD_UBI_FASTMAP
1412
1413 if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
1414 ubi->fm_disabled = 1;
1415 force_scan = 1;
1416 }
1417
1418 if (force_scan)
1419 err = scan_all(ubi, ai, 0);
1420 else {
1421 err = scan_fast(ubi, &ai);
1422 if (err > 0 || mtd_is_eccerr(err)) {
1423 if (err != UBI_NO_FASTMAP) {
1424 destroy_ai(ai);
1425 ai = alloc_ai();
1426 if (!ai)
1427 return -ENOMEM;
1428
1429 err = scan_all(ubi, ai, 0);
1430 } else {
1431 err = scan_all(ubi, ai, UBI_FM_MAX_START);
1432 }
1433 }
1434 }
1435#else
1436 err = scan_all(ubi, ai, 0);
1437#endif
1438 if (err)
1439 goto out_ai;
1440
1441 ubi->bad_peb_count = ai->bad_peb_count;
1442 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
1443 ubi->corr_peb_count = ai->corr_peb_count;
1444 ubi->max_ec = ai->max_ec;
1445 ubi->mean_ec = ai->mean_ec;
1446 dbg_gen("max. sequence number: %llu", ai->max_sqnum);
1447
1448 err = ubi_read_volume_table(ubi, ai);
1449 if (err)
1450 goto out_ai;
1451
1452 err = ubi_wl_init(ubi, ai);
1453 if (err)
1454 goto out_vtbl;
1455
1456 err = ubi_eba_init(ubi, ai);
1457 if (err)
1458 goto out_wl;
1459
1460#ifdef CONFIG_MTD_UBI_FASTMAP
1461 if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
1462 struct ubi_attach_info *scan_ai;
1463
1464 scan_ai = alloc_ai();
1465 if (!scan_ai) {
1466 err = -ENOMEM;
1467 goto out_wl;
1468 }
1469
1470 err = scan_all(ubi, scan_ai, 0);
1471 if (err) {
1472 destroy_ai(scan_ai);
1473 goto out_wl;
1474 }
1475
1476 err = self_check_eba(ubi, ai, scan_ai);
1477 destroy_ai(scan_ai);
1478
1479 if (err)
1480 goto out_wl;
1481 }
1482#endif
1483
1484 destroy_ai(ai);
1485 return 0;
1486
1487out_wl:
1488 ubi_wl_close(ubi);
1489out_vtbl:
1490 ubi_free_internal_volumes(ubi);
1491 vfree(ubi->vtbl);
1492out_ai:
1493 destroy_ai(ai);
1494 return err;
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1506{
1507 int pnum, err, vols_found = 0;
1508 struct rb_node *rb1, *rb2;
1509 struct ubi_ainf_volume *av;
1510 struct ubi_ainf_peb *aeb, *last_aeb;
1511 uint8_t *buf;
1512
1513 if (!ubi_dbg_chk_gen(ubi))
1514 return 0;
1515
1516
1517
1518
1519 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1520 int leb_count = 0;
1521
1522 cond_resched();
1523
1524 vols_found += 1;
1525
1526 if (ai->is_empty) {
1527 ubi_err(ubi, "bad is_empty flag");
1528 goto bad_av;
1529 }
1530
1531 if (av->vol_id < 0 || av->highest_lnum < 0 ||
1532 av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
1533 av->data_pad < 0 || av->last_data_size < 0) {
1534 ubi_err(ubi, "negative values");
1535 goto bad_av;
1536 }
1537
1538 if (av->vol_id >= UBI_MAX_VOLUMES &&
1539 av->vol_id < UBI_INTERNAL_VOL_START) {
1540 ubi_err(ubi, "bad vol_id");
1541 goto bad_av;
1542 }
1543
1544 if (av->vol_id > ai->highest_vol_id) {
1545 ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
1546 ai->highest_vol_id, av->vol_id);
1547 goto out;
1548 }
1549
1550 if (av->vol_type != UBI_DYNAMIC_VOLUME &&
1551 av->vol_type != UBI_STATIC_VOLUME) {
1552 ubi_err(ubi, "bad vol_type");
1553 goto bad_av;
1554 }
1555
1556 if (av->data_pad > ubi->leb_size / 2) {
1557 ubi_err(ubi, "bad data_pad");
1558 goto bad_av;
1559 }
1560
1561 last_aeb = NULL;
1562 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1563 cond_resched();
1564
1565 last_aeb = aeb;
1566 leb_count += 1;
1567
1568 if (aeb->pnum < 0 || aeb->ec < 0) {
1569 ubi_err(ubi, "negative values");
1570 goto bad_aeb;
1571 }
1572
1573 if (aeb->ec < ai->min_ec) {
1574 ubi_err(ubi, "bad ai->min_ec (%d), %d found",
1575 ai->min_ec, aeb->ec);
1576 goto bad_aeb;
1577 }
1578
1579 if (aeb->ec > ai->max_ec) {
1580 ubi_err(ubi, "bad ai->max_ec (%d), %d found",
1581 ai->max_ec, aeb->ec);
1582 goto bad_aeb;
1583 }
1584
1585 if (aeb->pnum >= ubi->peb_count) {
1586 ubi_err(ubi, "too high PEB number %d, total PEBs %d",
1587 aeb->pnum, ubi->peb_count);
1588 goto bad_aeb;
1589 }
1590
1591 if (av->vol_type == UBI_STATIC_VOLUME) {
1592 if (aeb->lnum >= av->used_ebs) {
1593 ubi_err(ubi, "bad lnum or used_ebs");
1594 goto bad_aeb;
1595 }
1596 } else {
1597 if (av->used_ebs != 0) {
1598 ubi_err(ubi, "non-zero used_ebs");
1599 goto bad_aeb;
1600 }
1601 }
1602
1603 if (aeb->lnum > av->highest_lnum) {
1604 ubi_err(ubi, "incorrect highest_lnum or lnum");
1605 goto bad_aeb;
1606 }
1607 }
1608
1609 if (av->leb_count != leb_count) {
1610 ubi_err(ubi, "bad leb_count, %d objects in the tree",
1611 leb_count);
1612 goto bad_av;
1613 }
1614
1615 if (!last_aeb)
1616 continue;
1617
1618 aeb = last_aeb;
1619
1620 if (aeb->lnum != av->highest_lnum) {
1621 ubi_err(ubi, "bad highest_lnum");
1622 goto bad_aeb;
1623 }
1624 }
1625
1626 if (vols_found != ai->vols_found) {
1627 ubi_err(ubi, "bad ai->vols_found %d, should be %d",
1628 ai->vols_found, vols_found);
1629 goto out;
1630 }
1631
1632
1633 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1634 last_aeb = NULL;
1635 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1636 int vol_type;
1637
1638 cond_resched();
1639
1640 last_aeb = aeb;
1641
1642 err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
1643 if (err && err != UBI_IO_BITFLIPS) {
1644 ubi_err(ubi, "VID header is not OK (%d)",
1645 err);
1646 if (err > 0)
1647 err = -EIO;
1648 return err;
1649 }
1650
1651 vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
1652 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
1653 if (av->vol_type != vol_type) {
1654 ubi_err(ubi, "bad vol_type");
1655 goto bad_vid_hdr;
1656 }
1657
1658 if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
1659 ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
1660 goto bad_vid_hdr;
1661 }
1662
1663 if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
1664 ubi_err(ubi, "bad vol_id %d", av->vol_id);
1665 goto bad_vid_hdr;
1666 }
1667
1668 if (av->compat != vidh->compat) {
1669 ubi_err(ubi, "bad compat %d", vidh->compat);
1670 goto bad_vid_hdr;
1671 }
1672
1673 if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
1674 ubi_err(ubi, "bad lnum %d", aeb->lnum);
1675 goto bad_vid_hdr;
1676 }
1677
1678 if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
1679 ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
1680 goto bad_vid_hdr;
1681 }
1682
1683 if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
1684 ubi_err(ubi, "bad data_pad %d", av->data_pad);
1685 goto bad_vid_hdr;
1686 }
1687 }
1688
1689 if (!last_aeb)
1690 continue;
1691
1692 if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
1693 ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
1694 goto bad_vid_hdr;
1695 }
1696
1697 if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
1698 ubi_err(ubi, "bad last_data_size %d",
1699 av->last_data_size);
1700 goto bad_vid_hdr;
1701 }
1702 }
1703
1704
1705
1706
1707
1708 buf = kzalloc(ubi->peb_count, GFP_KERNEL);
1709 if (!buf)
1710 return -ENOMEM;
1711
1712 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
1713 err = ubi_io_is_bad(ubi, pnum);
1714 if (err < 0) {
1715 kfree(buf);
1716 return err;
1717 } else if (err)
1718 buf[pnum] = 1;
1719 }
1720
1721 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
1722 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1723 buf[aeb->pnum] = 1;
1724
1725 list_for_each_entry(aeb, &ai->free, u.list)
1726 buf[aeb->pnum] = 1;
1727
1728 list_for_each_entry(aeb, &ai->corr, u.list)
1729 buf[aeb->pnum] = 1;
1730
1731 list_for_each_entry(aeb, &ai->erase, u.list)
1732 buf[aeb->pnum] = 1;
1733
1734 list_for_each_entry(aeb, &ai->alien, u.list)
1735 buf[aeb->pnum] = 1;
1736
1737 err = 0;
1738 for (pnum = 0; pnum < ubi->peb_count; pnum++)
1739 if (!buf[pnum]) {
1740 ubi_err(ubi, "PEB %d is not referred", pnum);
1741 err = 1;
1742 }
1743
1744 kfree(buf);
1745 if (err)
1746 goto out;
1747 return 0;
1748
1749bad_aeb:
1750 ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
1751 ubi_dump_aeb(aeb, 0);
1752 ubi_dump_av(av);
1753 goto out;
1754
1755bad_av:
1756 ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
1757 ubi_dump_av(av);
1758 goto out;
1759
1760bad_vid_hdr:
1761 ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
1762 ubi_dump_av(av);
1763 ubi_dump_vid_hdr(vidh);
1764
1765out:
1766 dump_stack();
1767 return -EINVAL;
1768}
1769