1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#ifndef __UBOOT__
74#include <linux/err.h>
75#include <linux/slab.h>
76#include <linux/crc32.h>
77#include <linux/random.h>
78#else
79#include <div64.h>
80#include <linux/err.h>
81#endif
82
83#include <linux/math64.h>
84
85#include <ubi_uboot.h>
86#include "ubi.h"
87
88static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
89
90
91static struct ubi_ec_hdr *ech;
92static struct ubi_vid_hdr *vidh;
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
116 int lnum, int ec, int to_head, struct list_head *list)
117{
118 struct ubi_ainf_peb *aeb;
119
120 if (list == &ai->free) {
121 dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
122 } else if (list == &ai->erase) {
123 dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
124 } else if (list == &ai->alien) {
125 dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
126 ai->alien_peb_count += 1;
127 } else
128 BUG();
129
130 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
131 if (!aeb)
132 return -ENOMEM;
133
134 aeb->pnum = pnum;
135 aeb->vol_id = vol_id;
136 aeb->lnum = lnum;
137 aeb->ec = ec;
138 if (to_head)
139 list_add(&aeb->u.list, list);
140 else
141 list_add_tail(&aeb->u.list, list);
142 return 0;
143}
144
145
146
147
148
149
150
151
152
153
154
155
156static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
157{
158 struct ubi_ainf_peb *aeb;
159
160 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
161
162 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
163 if (!aeb)
164 return -ENOMEM;
165
166 ai->corr_peb_count += 1;
167 aeb->pnum = pnum;
168 aeb->ec = ec;
169 list_add(&aeb->u.list, &ai->corr);
170 return 0;
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188static int validate_vid_hdr(const struct ubi_device *ubi,
189 const struct ubi_vid_hdr *vid_hdr,
190 const struct ubi_ainf_volume *av, int pnum)
191{
192 int vol_type = vid_hdr->vol_type;
193 int vol_id = be32_to_cpu(vid_hdr->vol_id);
194 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
195 int data_pad = be32_to_cpu(vid_hdr->data_pad);
196
197 if (av->leb_count != 0) {
198 int av_vol_type;
199
200
201
202
203
204
205
206 if (vol_id != av->vol_id) {
207 ubi_err(ubi, "inconsistent vol_id");
208 goto bad;
209 }
210
211 if (av->vol_type == UBI_STATIC_VOLUME)
212 av_vol_type = UBI_VID_STATIC;
213 else
214 av_vol_type = UBI_VID_DYNAMIC;
215
216 if (vol_type != av_vol_type) {
217 ubi_err(ubi, "inconsistent vol_type");
218 goto bad;
219 }
220
221 if (used_ebs != av->used_ebs) {
222 ubi_err(ubi, "inconsistent used_ebs");
223 goto bad;
224 }
225
226 if (data_pad != av->data_pad) {
227 ubi_err(ubi, "inconsistent data_pad");
228 goto bad;
229 }
230 }
231
232 return 0;
233
234bad:
235 ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
236 ubi_dump_vid_hdr(vid_hdr);
237 ubi_dump_av(av);
238 return -EINVAL;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
255 int vol_id, int pnum,
256 const struct ubi_vid_hdr *vid_hdr)
257{
258 struct ubi_ainf_volume *av;
259 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
260
261 ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
262
263
264 while (*p) {
265 parent = *p;
266 av = rb_entry(parent, struct ubi_ainf_volume, rb);
267
268 if (vol_id == av->vol_id)
269 return av;
270
271 if (vol_id > av->vol_id)
272 p = &(*p)->rb_left;
273 else
274 p = &(*p)->rb_right;
275 }
276
277
278 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
279 if (!av)
280 return ERR_PTR(-ENOMEM);
281
282 av->highest_lnum = av->leb_count = 0;
283 av->vol_id = vol_id;
284 av->root = RB_ROOT;
285 av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
286 av->data_pad = be32_to_cpu(vid_hdr->data_pad);
287 av->compat = vid_hdr->compat;
288 av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
289 : UBI_STATIC_VOLUME;
290 if (vol_id > ai->highest_vol_id)
291 ai->highest_vol_id = vol_id;
292
293 rb_link_node(&av->rb, parent, p);
294 rb_insert_color(&av->rb, &ai->volumes);
295 ai->vols_found += 1;
296 dbg_bld("added volume %d", vol_id);
297 return av;
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
321 int pnum, const struct ubi_vid_hdr *vid_hdr)
322{
323 int len, err, second_is_newer, bitflips = 0, corrupted = 0;
324 uint32_t data_crc, crc;
325 struct ubi_vid_hdr *vh = NULL;
326 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
327
328 if (sqnum2 == aeb->sqnum) {
329
330
331
332
333
334
335
336
337 ubi_err(ubi, "unsupported on-flash UBI format");
338 return -EINVAL;
339 }
340
341
342 second_is_newer = (sqnum2 > aeb->sqnum);
343
344
345
346
347
348
349
350
351
352
353 if (second_is_newer) {
354 if (!vid_hdr->copy_flag) {
355
356 dbg_bld("second PEB %d is newer, copy_flag is unset",
357 pnum);
358 return 1;
359 }
360 } else {
361 if (!aeb->copy_flag) {
362
363 dbg_bld("first PEB %d is newer, copy_flag is unset",
364 pnum);
365 return bitflips << 1;
366 }
367
368 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
369 if (!vh)
370 return -ENOMEM;
371
372 pnum = aeb->pnum;
373 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
374 if (err) {
375 if (err == UBI_IO_BITFLIPS)
376 bitflips = 1;
377 else {
378 ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
379 pnum, err);
380 if (err > 0)
381 err = -EIO;
382
383 goto out_free_vidh;
384 }
385 }
386
387 vid_hdr = vh;
388 }
389
390
391
392 len = be32_to_cpu(vid_hdr->data_size);
393
394 mutex_lock(&ubi->buf_mutex);
395 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
396 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
397 goto out_unlock;
398
399 data_crc = be32_to_cpu(vid_hdr->data_crc);
400 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
401 if (crc != data_crc) {
402 dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
403 pnum, crc, data_crc);
404 corrupted = 1;
405 bitflips = 0;
406 second_is_newer = !second_is_newer;
407 } else {
408 dbg_bld("PEB %d CRC is OK", pnum);
409 bitflips |= !!err;
410 }
411 mutex_unlock(&ubi->buf_mutex);
412
413 ubi_free_vid_hdr(ubi, vh);
414
415 if (second_is_newer)
416 dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
417 else
418 dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
419
420 return second_is_newer | (bitflips << 1) | (corrupted << 2);
421
422out_unlock:
423 mutex_unlock(&ubi->buf_mutex);
424out_free_vidh:
425 ubi_free_vid_hdr(ubi, vh);
426 return err;
427}
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
446 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
447{
448 int err, vol_id, lnum;
449 unsigned long long sqnum;
450 struct ubi_ainf_volume *av;
451 struct ubi_ainf_peb *aeb;
452 struct rb_node **p, *parent = NULL;
453
454 vol_id = be32_to_cpu(vid_hdr->vol_id);
455 lnum = be32_to_cpu(vid_hdr->lnum);
456 sqnum = be64_to_cpu(vid_hdr->sqnum);
457
458 dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
459 pnum, vol_id, lnum, ec, sqnum, bitflips);
460
461 av = add_volume(ai, vol_id, pnum, vid_hdr);
462 if (IS_ERR(av))
463 return PTR_ERR(av);
464
465 if (ai->max_sqnum < sqnum)
466 ai->max_sqnum = sqnum;
467
468
469
470
471
472 p = &av->root.rb_node;
473 while (*p) {
474 int cmp_res;
475
476 parent = *p;
477 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
478 if (lnum != aeb->lnum) {
479 if (lnum < aeb->lnum)
480 p = &(*p)->rb_left;
481 else
482 p = &(*p)->rb_right;
483 continue;
484 }
485
486
487
488
489
490
491 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
492 aeb->pnum, aeb->sqnum, aeb->ec);
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507 if (aeb->sqnum == sqnum && sqnum != 0) {
508 ubi_err(ubi, "two LEBs with same sequence number %llu",
509 sqnum);
510 ubi_dump_aeb(aeb, 0);
511 ubi_dump_vid_hdr(vid_hdr);
512 return -EINVAL;
513 }
514
515
516
517
518
519 cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
520 if (cmp_res < 0)
521 return cmp_res;
522
523 if (cmp_res & 1) {
524
525
526
527
528 err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
529 if (err)
530 return err;
531
532 err = add_to_list(ai, aeb->pnum, aeb->vol_id,
533 aeb->lnum, aeb->ec, cmp_res & 4,
534 &ai->erase);
535 if (err)
536 return err;
537
538 aeb->ec = ec;
539 aeb->pnum = pnum;
540 aeb->vol_id = vol_id;
541 aeb->lnum = lnum;
542 aeb->scrub = ((cmp_res & 2) || bitflips);
543 aeb->copy_flag = vid_hdr->copy_flag;
544 aeb->sqnum = sqnum;
545
546 if (av->highest_lnum == lnum)
547 av->last_data_size =
548 be32_to_cpu(vid_hdr->data_size);
549
550 return 0;
551 } else {
552
553
554
555
556 return add_to_list(ai, pnum, vol_id, lnum, ec,
557 cmp_res & 4, &ai->erase);
558 }
559 }
560
561
562
563
564
565
566 err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
567 if (err)
568 return err;
569
570 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
571 if (!aeb)
572 return -ENOMEM;
573
574 aeb->ec = ec;
575 aeb->pnum = pnum;
576 aeb->vol_id = vol_id;
577 aeb->lnum = lnum;
578 aeb->scrub = bitflips;
579 aeb->copy_flag = vid_hdr->copy_flag;
580 aeb->sqnum = sqnum;
581
582 if (av->highest_lnum <= lnum) {
583 av->highest_lnum = lnum;
584 av->last_data_size = be32_to_cpu(vid_hdr->data_size);
585 }
586
587 av->leb_count += 1;
588 rb_link_node(&aeb->u.rb, parent, p);
589 rb_insert_color(&aeb->u.rb, &av->root);
590 return 0;
591}
592
593
594
595
596
597
598
599
600
601struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
602 int vol_id)
603{
604 struct ubi_ainf_volume *av;
605 struct rb_node *p = ai->volumes.rb_node;
606
607 while (p) {
608 av = rb_entry(p, struct ubi_ainf_volume, rb);
609
610 if (vol_id == av->vol_id)
611 return av;
612
613 if (vol_id > av->vol_id)
614 p = p->rb_left;
615 else
616 p = p->rb_right;
617 }
618
619 return NULL;
620}
621
622
623
624
625
626
627void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
628{
629 struct rb_node *rb;
630 struct ubi_ainf_peb *aeb;
631
632 dbg_bld("remove attaching information about volume %d", av->vol_id);
633
634 while ((rb = rb_first(&av->root))) {
635 aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
636 rb_erase(&aeb->u.rb, &av->root);
637 list_add_tail(&aeb->u.list, &ai->erase);
638 }
639
640 rb_erase(&av->rb, &ai->volumes);
641 kfree(av);
642 ai->vols_found -= 1;
643}
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658static int early_erase_peb(struct ubi_device *ubi,
659 const struct ubi_attach_info *ai, int pnum, int ec)
660{
661 int err;
662 struct ubi_ec_hdr *ec_hdr;
663
664 if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
665
666
667
668
669 ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
670 pnum, ec);
671 return -EINVAL;
672 }
673
674 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
675 if (!ec_hdr)
676 return -ENOMEM;
677
678 ec_hdr->ec = cpu_to_be64(ec);
679
680 err = ubi_io_sync_erase(ubi, pnum, 0);
681 if (err < 0)
682 goto out_free;
683
684 err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
685
686out_free:
687 kfree(ec_hdr);
688 return err;
689}
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
706 struct ubi_attach_info *ai)
707{
708 int err = 0;
709 struct ubi_ainf_peb *aeb, *tmp_aeb;
710
711 if (!list_empty(&ai->free)) {
712 aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
713 list_del(&aeb->u.list);
714 dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
715 return aeb;
716 }
717
718
719
720
721
722
723
724 list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
725 if (aeb->ec == UBI_UNKNOWN)
726 aeb->ec = ai->mean_ec;
727
728 err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
729 if (err)
730 continue;
731
732 aeb->ec += 1;
733 list_del(&aeb->u.list);
734 dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
735 return aeb;
736 }
737
738 ubi_err(ubi, "no free eraseblocks");
739 return ERR_PTR(-ENOSPC);
740}
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
760 int pnum)
761{
762 int err;
763
764 mutex_lock(&ubi->buf_mutex);
765 memset(ubi->peb_buf, 0x00, ubi->leb_size);
766
767 err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
768 ubi->leb_size);
769 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
770
771
772
773
774
775
776
777 err = 0;
778 goto out_unlock;
779 }
780
781 if (err)
782 goto out_unlock;
783
784 if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
785 goto out_unlock;
786
787 ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
788 pnum);
789 ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
790 ubi_dump_vid_hdr(vid_hdr);
791 pr_err("hexdump of PEB %d offset %d, length %d",
792 pnum, ubi->leb_start, ubi->leb_size);
793 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
794 ubi->peb_buf, ubi->leb_size, 1);
795 err = 1;
796
797out_unlock:
798 mutex_unlock(&ubi->buf_mutex);
799 return err;
800}
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
816 int pnum, int *vid, unsigned long long *sqnum)
817{
818 long long uninitialized_var(ec);
819 int err, bitflips = 0, vol_id = -1, ec_err = 0;
820
821 dbg_bld("scan PEB %d", pnum);
822
823
824 err = ubi_io_is_bad(ubi, pnum);
825 if (err < 0)
826 return err;
827 else if (err) {
828 ai->bad_peb_count += 1;
829 return 0;
830 }
831
832 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
833 if (err < 0)
834 return err;
835 switch (err) {
836 case 0:
837 break;
838 case UBI_IO_BITFLIPS:
839 bitflips = 1;
840 break;
841 case UBI_IO_FF:
842 ai->empty_peb_count += 1;
843 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
844 UBI_UNKNOWN, 0, &ai->erase);
845 case UBI_IO_FF_BITFLIPS:
846 ai->empty_peb_count += 1;
847 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
848 UBI_UNKNOWN, 1, &ai->erase);
849 case UBI_IO_BAD_HDR_EBADMSG:
850 case UBI_IO_BAD_HDR:
851
852
853
854
855
856 ec_err = err;
857 ec = UBI_UNKNOWN;
858 bitflips = 1;
859 break;
860 default:
861 ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
862 err);
863 return -EINVAL;
864 }
865
866 if (!ec_err) {
867 int image_seq;
868
869
870 if (ech->version != UBI_VERSION) {
871 ubi_err(ubi, "this UBI version is %d, image version is %d",
872 UBI_VERSION, (int)ech->version);
873 return -EINVAL;
874 }
875
876 ec = be64_to_cpu(ech->ec);
877 if (ec > UBI_MAX_ERASECOUNTER) {
878
879
880
881
882
883
884
885 ubi_err(ubi, "erase counter overflow, max is %d",
886 UBI_MAX_ERASECOUNTER);
887 ubi_dump_ec_hdr(ech);
888 return -EINVAL;
889 }
890
891
892
893
894
895
896
897
898
899
900
901
902 image_seq = be32_to_cpu(ech->image_seq);
903 if (!ubi->image_seq)
904 ubi->image_seq = image_seq;
905 if (image_seq && ubi->image_seq != image_seq) {
906 ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
907 image_seq, pnum, ubi->image_seq);
908 ubi_dump_ec_hdr(ech);
909 return -EINVAL;
910 }
911 }
912
913
914
915 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
916 if (err < 0)
917 return err;
918 switch (err) {
919 case 0:
920 break;
921 case UBI_IO_BITFLIPS:
922 bitflips = 1;
923 break;
924 case UBI_IO_BAD_HDR_EBADMSG:
925 if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
926
927
928
929
930
931
932 ai->maybe_bad_peb_count += 1;
933 case UBI_IO_BAD_HDR:
934 if (ec_err)
935
936
937
938
939
940
941
942
943
944
945
946
947 err = 0;
948 else
949
950
951
952
953 err = check_corruption(ubi, vidh, pnum);
954
955 if (err < 0)
956 return err;
957 else if (!err)
958
959 err = add_to_list(ai, pnum, UBI_UNKNOWN,
960 UBI_UNKNOWN, ec, 1, &ai->erase);
961 else
962
963 err = add_corrupted(ai, pnum, ec);
964 if (err)
965 return err;
966 goto adjust_mean_ec;
967 case UBI_IO_FF_BITFLIPS:
968 err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
969 ec, 1, &ai->erase);
970 if (err)
971 return err;
972 goto adjust_mean_ec;
973 case UBI_IO_FF:
974 if (ec_err || bitflips)
975 err = add_to_list(ai, pnum, UBI_UNKNOWN,
976 UBI_UNKNOWN, ec, 1, &ai->erase);
977 else
978 err = add_to_list(ai, pnum, UBI_UNKNOWN,
979 UBI_UNKNOWN, ec, 0, &ai->free);
980 if (err)
981 return err;
982 goto adjust_mean_ec;
983 default:
984 ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
985 err);
986 return -EINVAL;
987 }
988
989 vol_id = be32_to_cpu(vidh->vol_id);
990 if (vid)
991 *vid = vol_id;
992 if (sqnum)
993 *sqnum = be64_to_cpu(vidh->sqnum);
994 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
995 int lnum = be32_to_cpu(vidh->lnum);
996
997
998 switch (vidh->compat) {
999 case UBI_COMPAT_DELETE:
1000 if (vol_id != UBI_FM_SB_VOLUME_ID
1001 && vol_id != UBI_FM_DATA_VOLUME_ID) {
1002 ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
1003 vol_id, lnum);
1004 }
1005 err = add_to_list(ai, pnum, vol_id, lnum,
1006 ec, 1, &ai->erase);
1007 if (err)
1008 return err;
1009 return 0;
1010
1011 case UBI_COMPAT_RO:
1012 ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
1013 vol_id, lnum);
1014 ubi->ro_mode = 1;
1015 break;
1016
1017 case UBI_COMPAT_PRESERVE:
1018 ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
1019 vol_id, lnum);
1020 err = add_to_list(ai, pnum, vol_id, lnum,
1021 ec, 0, &ai->alien);
1022 if (err)
1023 return err;
1024 return 0;
1025
1026 case UBI_COMPAT_REJECT:
1027 ubi_err(ubi, "incompatible internal volume %d:%d found",
1028 vol_id, lnum);
1029 return -EINVAL;
1030 }
1031 }
1032
1033 if (ec_err)
1034 ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
1035 pnum);
1036 err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
1037 if (err)
1038 return err;
1039
1040adjust_mean_ec:
1041 if (!ec_err) {
1042 ai->ec_sum += ec;
1043 ai->ec_count += 1;
1044 if (ec > ai->max_ec)
1045 ai->max_ec = ec;
1046 if (ec < ai->min_ec)
1047 ai->min_ec = ec;
1048 }
1049
1050 return 0;
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1065{
1066 struct ubi_ainf_peb *aeb;
1067 int max_corr, peb_count;
1068
1069 peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
1070 max_corr = peb_count / 20 ?: 8;
1071
1072
1073
1074
1075
1076
1077 if (ai->corr_peb_count) {
1078 ubi_err(ubi, "%d PEBs are corrupted and preserved",
1079 ai->corr_peb_count);
1080 pr_err("Corrupted PEBs are:");
1081 list_for_each_entry(aeb, &ai->corr, u.list)
1082 pr_cont(" %d", aeb->pnum);
1083 pr_cont("\n");
1084
1085
1086
1087
1088
1089 if (ai->corr_peb_count >= max_corr) {
1090 ubi_err(ubi, "too many corrupted PEBs, refusing");
1091 return -EINVAL;
1092 }
1093 }
1094
1095 if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 if (ai->maybe_bad_peb_count <= 2) {
1112 ai->is_empty = 1;
1113 ubi_msg(ubi, "empty MTD device detected");
1114 get_random_bytes(&ubi->image_seq,
1115 sizeof(ubi->image_seq));
1116 } else {
1117 ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
1118 return -EINVAL;
1119 }
1120
1121 }
1122
1123 return 0;
1124}
1125
1126
1127
1128
1129
1130
1131
1132
1133static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
1134{
1135 struct ubi_ainf_peb *aeb;
1136 struct rb_node *this = av->root.rb_node;
1137
1138 while (this) {
1139 if (this->rb_left)
1140 this = this->rb_left;
1141 else if (this->rb_right)
1142 this = this->rb_right;
1143 else {
1144 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
1145 this = rb_parent(this);
1146 if (this) {
1147 if (this->rb_left == &aeb->u.rb)
1148 this->rb_left = NULL;
1149 else
1150 this->rb_right = NULL;
1151 }
1152
1153 kmem_cache_free(ai->aeb_slab_cache, aeb);
1154 }
1155 }
1156 kfree(av);
1157}
1158
1159
1160
1161
1162
1163static void destroy_ai(struct ubi_attach_info *ai)
1164{
1165 struct ubi_ainf_peb *aeb, *aeb_tmp;
1166 struct ubi_ainf_volume *av;
1167 struct rb_node *rb;
1168
1169 list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
1170 list_del(&aeb->u.list);
1171 kmem_cache_free(ai->aeb_slab_cache, aeb);
1172 }
1173 list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
1174 list_del(&aeb->u.list);
1175 kmem_cache_free(ai->aeb_slab_cache, aeb);
1176 }
1177 list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
1178 list_del(&aeb->u.list);
1179 kmem_cache_free(ai->aeb_slab_cache, aeb);
1180 }
1181 list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
1182 list_del(&aeb->u.list);
1183 kmem_cache_free(ai->aeb_slab_cache, aeb);
1184 }
1185
1186
1187 rb = ai->volumes.rb_node;
1188 while (rb) {
1189 if (rb->rb_left)
1190 rb = rb->rb_left;
1191 else if (rb->rb_right)
1192 rb = rb->rb_right;
1193 else {
1194 av = rb_entry(rb, struct ubi_ainf_volume, rb);
1195
1196 rb = rb_parent(rb);
1197 if (rb) {
1198 if (rb->rb_left == &av->rb)
1199 rb->rb_left = NULL;
1200 else
1201 rb->rb_right = NULL;
1202 }
1203
1204 destroy_av(ai, av);
1205 }
1206 }
1207
1208 if (ai->aeb_slab_cache)
1209 kmem_cache_destroy(ai->aeb_slab_cache);
1210
1211 kfree(ai);
1212}
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
1225 int start)
1226{
1227 int err, pnum;
1228 struct rb_node *rb1, *rb2;
1229 struct ubi_ainf_volume *av;
1230 struct ubi_ainf_peb *aeb;
1231
1232 err = -ENOMEM;
1233
1234 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1235 if (!ech)
1236 return err;
1237
1238 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1239 if (!vidh)
1240 goto out_ech;
1241
1242 for (pnum = start; pnum < ubi->peb_count; pnum++) {
1243 cond_resched();
1244
1245 dbg_gen("process PEB %d", pnum);
1246 err = scan_peb(ubi, ai, pnum, NULL, NULL);
1247 if (err < 0)
1248 goto out_vidh;
1249 }
1250
1251 ubi_msg(ubi, "scanning is finished");
1252
1253
1254 if (ai->ec_count)
1255 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
1256
1257 err = late_analysis(ubi, ai);
1258 if (err)
1259 goto out_vidh;
1260
1261
1262
1263
1264
1265 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1266 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1267 if (aeb->ec == UBI_UNKNOWN)
1268 aeb->ec = ai->mean_ec;
1269 }
1270
1271 list_for_each_entry(aeb, &ai->free, u.list) {
1272 if (aeb->ec == UBI_UNKNOWN)
1273 aeb->ec = ai->mean_ec;
1274 }
1275
1276 list_for_each_entry(aeb, &ai->corr, u.list)
1277 if (aeb->ec == UBI_UNKNOWN)
1278 aeb->ec = ai->mean_ec;
1279
1280 list_for_each_entry(aeb, &ai->erase, u.list)
1281 if (aeb->ec == UBI_UNKNOWN)
1282 aeb->ec = ai->mean_ec;
1283
1284 err = self_check_ai(ubi, ai);
1285 if (err)
1286 goto out_vidh;
1287
1288 ubi_free_vid_hdr(ubi, vidh);
1289 kfree(ech);
1290
1291 return 0;
1292
1293out_vidh:
1294 ubi_free_vid_hdr(ubi, vidh);
1295out_ech:
1296 kfree(ech);
1297 return err;
1298}
1299
1300static struct ubi_attach_info *alloc_ai(void)
1301{
1302 struct ubi_attach_info *ai;
1303
1304 ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
1305 if (!ai)
1306 return ai;
1307
1308 INIT_LIST_HEAD(&ai->corr);
1309 INIT_LIST_HEAD(&ai->free);
1310 INIT_LIST_HEAD(&ai->erase);
1311 INIT_LIST_HEAD(&ai->alien);
1312 ai->volumes = RB_ROOT;
1313 ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
1314 sizeof(struct ubi_ainf_peb),
1315 0, 0, NULL);
1316 if (!ai->aeb_slab_cache) {
1317 kfree(ai);
1318 ai = NULL;
1319 }
1320
1321 return ai;
1322}
1323
1324#ifdef CONFIG_MTD_UBI_FASTMAP
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
1337{
1338 int err, pnum, fm_anchor = -1;
1339 unsigned long long max_sqnum = 0;
1340
1341 err = -ENOMEM;
1342
1343 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1344 if (!ech)
1345 goto out;
1346
1347 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1348 if (!vidh)
1349 goto out_ech;
1350
1351 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
1352 int vol_id = -1;
1353 unsigned long long sqnum = -1;
1354 cond_resched();
1355
1356 dbg_gen("process PEB %d", pnum);
1357 err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
1358 if (err < 0)
1359 goto out_vidh;
1360
1361 if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
1362 max_sqnum = sqnum;
1363 fm_anchor = pnum;
1364 }
1365 }
1366
1367 ubi_free_vid_hdr(ubi, vidh);
1368 kfree(ech);
1369
1370 if (fm_anchor < 0)
1371 return UBI_NO_FASTMAP;
1372
1373 destroy_ai(*ai);
1374 *ai = alloc_ai();
1375 if (!*ai)
1376 return -ENOMEM;
1377
1378 return ubi_scan_fastmap(ubi, *ai, fm_anchor);
1379
1380out_vidh:
1381 ubi_free_vid_hdr(ubi, vidh);
1382out_ech:
1383 kfree(ech);
1384out:
1385 return err;
1386}
1387
1388#endif
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398int ubi_attach(struct ubi_device *ubi, int force_scan)
1399{
1400 int err;
1401 struct ubi_attach_info *ai;
1402
1403 ai = alloc_ai();
1404 if (!ai)
1405 return -ENOMEM;
1406
1407#ifdef CONFIG_MTD_UBI_FASTMAP
1408
1409 if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
1410 ubi->fm_disabled = 1;
1411 force_scan = 1;
1412 }
1413
1414 if (force_scan)
1415 err = scan_all(ubi, ai, 0);
1416 else {
1417 err = scan_fast(ubi, &ai);
1418 if (err > 0 || mtd_is_eccerr(err)) {
1419 if (err != UBI_NO_FASTMAP) {
1420 destroy_ai(ai);
1421 ai = alloc_ai();
1422 if (!ai)
1423 return -ENOMEM;
1424
1425 err = scan_all(ubi, ai, 0);
1426 } else {
1427 err = scan_all(ubi, ai, UBI_FM_MAX_START);
1428 }
1429 }
1430 }
1431#else
1432 err = scan_all(ubi, ai, 0);
1433#endif
1434 if (err)
1435 goto out_ai;
1436
1437 ubi->bad_peb_count = ai->bad_peb_count;
1438 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
1439 ubi->corr_peb_count = ai->corr_peb_count;
1440 ubi->max_ec = ai->max_ec;
1441 ubi->mean_ec = ai->mean_ec;
1442 dbg_gen("max. sequence number: %llu", ai->max_sqnum);
1443
1444 err = ubi_read_volume_table(ubi, ai);
1445 if (err)
1446 goto out_ai;
1447
1448 err = ubi_wl_init(ubi, ai);
1449 if (err)
1450 goto out_vtbl;
1451
1452 err = ubi_eba_init(ubi, ai);
1453 if (err)
1454 goto out_wl;
1455
1456#ifdef CONFIG_MTD_UBI_FASTMAP
1457 if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
1458 struct ubi_attach_info *scan_ai;
1459
1460 scan_ai = alloc_ai();
1461 if (!scan_ai) {
1462 err = -ENOMEM;
1463 goto out_wl;
1464 }
1465
1466 err = scan_all(ubi, scan_ai, 0);
1467 if (err) {
1468 destroy_ai(scan_ai);
1469 goto out_wl;
1470 }
1471
1472 err = self_check_eba(ubi, ai, scan_ai);
1473 destroy_ai(scan_ai);
1474
1475 if (err)
1476 goto out_wl;
1477 }
1478#endif
1479
1480 destroy_ai(ai);
1481 return 0;
1482
1483out_wl:
1484 ubi_wl_close(ubi);
1485out_vtbl:
1486 ubi_free_internal_volumes(ubi);
1487 vfree(ubi->vtbl);
1488out_ai:
1489 destroy_ai(ai);
1490 return err;
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1502{
1503 int pnum, err, vols_found = 0;
1504 struct rb_node *rb1, *rb2;
1505 struct ubi_ainf_volume *av;
1506 struct ubi_ainf_peb *aeb, *last_aeb;
1507 uint8_t *buf;
1508
1509 if (!ubi_dbg_chk_gen(ubi))
1510 return 0;
1511
1512
1513
1514
1515 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1516 int leb_count = 0;
1517
1518 cond_resched();
1519
1520 vols_found += 1;
1521
1522 if (ai->is_empty) {
1523 ubi_err(ubi, "bad is_empty flag");
1524 goto bad_av;
1525 }
1526
1527 if (av->vol_id < 0 || av->highest_lnum < 0 ||
1528 av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
1529 av->data_pad < 0 || av->last_data_size < 0) {
1530 ubi_err(ubi, "negative values");
1531 goto bad_av;
1532 }
1533
1534 if (av->vol_id >= UBI_MAX_VOLUMES &&
1535 av->vol_id < UBI_INTERNAL_VOL_START) {
1536 ubi_err(ubi, "bad vol_id");
1537 goto bad_av;
1538 }
1539
1540 if (av->vol_id > ai->highest_vol_id) {
1541 ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
1542 ai->highest_vol_id, av->vol_id);
1543 goto out;
1544 }
1545
1546 if (av->vol_type != UBI_DYNAMIC_VOLUME &&
1547 av->vol_type != UBI_STATIC_VOLUME) {
1548 ubi_err(ubi, "bad vol_type");
1549 goto bad_av;
1550 }
1551
1552 if (av->data_pad > ubi->leb_size / 2) {
1553 ubi_err(ubi, "bad data_pad");
1554 goto bad_av;
1555 }
1556
1557 last_aeb = NULL;
1558 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1559 cond_resched();
1560
1561 last_aeb = aeb;
1562 leb_count += 1;
1563
1564 if (aeb->pnum < 0 || aeb->ec < 0) {
1565 ubi_err(ubi, "negative values");
1566 goto bad_aeb;
1567 }
1568
1569 if (aeb->ec < ai->min_ec) {
1570 ubi_err(ubi, "bad ai->min_ec (%d), %d found",
1571 ai->min_ec, aeb->ec);
1572 goto bad_aeb;
1573 }
1574
1575 if (aeb->ec > ai->max_ec) {
1576 ubi_err(ubi, "bad ai->max_ec (%d), %d found",
1577 ai->max_ec, aeb->ec);
1578 goto bad_aeb;
1579 }
1580
1581 if (aeb->pnum >= ubi->peb_count) {
1582 ubi_err(ubi, "too high PEB number %d, total PEBs %d",
1583 aeb->pnum, ubi->peb_count);
1584 goto bad_aeb;
1585 }
1586
1587 if (av->vol_type == UBI_STATIC_VOLUME) {
1588 if (aeb->lnum >= av->used_ebs) {
1589 ubi_err(ubi, "bad lnum or used_ebs");
1590 goto bad_aeb;
1591 }
1592 } else {
1593 if (av->used_ebs != 0) {
1594 ubi_err(ubi, "non-zero used_ebs");
1595 goto bad_aeb;
1596 }
1597 }
1598
1599 if (aeb->lnum > av->highest_lnum) {
1600 ubi_err(ubi, "incorrect highest_lnum or lnum");
1601 goto bad_aeb;
1602 }
1603 }
1604
1605 if (av->leb_count != leb_count) {
1606 ubi_err(ubi, "bad leb_count, %d objects in the tree",
1607 leb_count);
1608 goto bad_av;
1609 }
1610
1611 if (!last_aeb)
1612 continue;
1613
1614 aeb = last_aeb;
1615
1616 if (aeb->lnum != av->highest_lnum) {
1617 ubi_err(ubi, "bad highest_lnum");
1618 goto bad_aeb;
1619 }
1620 }
1621
1622 if (vols_found != ai->vols_found) {
1623 ubi_err(ubi, "bad ai->vols_found %d, should be %d",
1624 ai->vols_found, vols_found);
1625 goto out;
1626 }
1627
1628
1629 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1630 last_aeb = NULL;
1631 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1632 int vol_type;
1633
1634 cond_resched();
1635
1636 last_aeb = aeb;
1637
1638 err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
1639 if (err && err != UBI_IO_BITFLIPS) {
1640 ubi_err(ubi, "VID header is not OK (%d)",
1641 err);
1642 if (err > 0)
1643 err = -EIO;
1644 return err;
1645 }
1646
1647 vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
1648 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
1649 if (av->vol_type != vol_type) {
1650 ubi_err(ubi, "bad vol_type");
1651 goto bad_vid_hdr;
1652 }
1653
1654 if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
1655 ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
1656 goto bad_vid_hdr;
1657 }
1658
1659 if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
1660 ubi_err(ubi, "bad vol_id %d", av->vol_id);
1661 goto bad_vid_hdr;
1662 }
1663
1664 if (av->compat != vidh->compat) {
1665 ubi_err(ubi, "bad compat %d", vidh->compat);
1666 goto bad_vid_hdr;
1667 }
1668
1669 if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
1670 ubi_err(ubi, "bad lnum %d", aeb->lnum);
1671 goto bad_vid_hdr;
1672 }
1673
1674 if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
1675 ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
1676 goto bad_vid_hdr;
1677 }
1678
1679 if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
1680 ubi_err(ubi, "bad data_pad %d", av->data_pad);
1681 goto bad_vid_hdr;
1682 }
1683 }
1684
1685 if (!last_aeb)
1686 continue;
1687
1688 if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
1689 ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
1690 goto bad_vid_hdr;
1691 }
1692
1693 if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
1694 ubi_err(ubi, "bad last_data_size %d",
1695 av->last_data_size);
1696 goto bad_vid_hdr;
1697 }
1698 }
1699
1700
1701
1702
1703
1704 buf = kzalloc(ubi->peb_count, GFP_KERNEL);
1705 if (!buf)
1706 return -ENOMEM;
1707
1708 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
1709 err = ubi_io_is_bad(ubi, pnum);
1710 if (err < 0) {
1711 kfree(buf);
1712 return err;
1713 } else if (err)
1714 buf[pnum] = 1;
1715 }
1716
1717 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
1718 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1719 buf[aeb->pnum] = 1;
1720
1721 list_for_each_entry(aeb, &ai->free, u.list)
1722 buf[aeb->pnum] = 1;
1723
1724 list_for_each_entry(aeb, &ai->corr, u.list)
1725 buf[aeb->pnum] = 1;
1726
1727 list_for_each_entry(aeb, &ai->erase, u.list)
1728 buf[aeb->pnum] = 1;
1729
1730 list_for_each_entry(aeb, &ai->alien, u.list)
1731 buf[aeb->pnum] = 1;
1732
1733 err = 0;
1734 for (pnum = 0; pnum < ubi->peb_count; pnum++)
1735 if (!buf[pnum]) {
1736 ubi_err(ubi, "PEB %d is not referred", pnum);
1737 err = 1;
1738 }
1739
1740 kfree(buf);
1741 if (err)
1742 goto out;
1743 return 0;
1744
1745bad_aeb:
1746 ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
1747 ubi_dump_aeb(aeb, 0);
1748 ubi_dump_av(av);
1749 goto out;
1750
1751bad_av:
1752 ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
1753 ubi_dump_av(av);
1754 goto out;
1755
1756bad_vid_hdr:
1757 ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
1758 ubi_dump_av(av);
1759 ubi_dump_vid_hdr(vidh);
1760
1761out:
1762 dump_stack();
1763 return -EINVAL;
1764}
1765