1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#ifndef __UBOOT__
74#include <linux/err.h>
75#include <linux/slab.h>
76#include <linux/crc32.h>
77#include <linux/random.h>
78#else
79#include <div64.h>
80#include <linux/err.h>
81#endif
82
83#include <linux/math64.h>
84
85#include <ubi_uboot.h>
86#include "ubi.h"
87
88static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
89
90
91static struct ubi_ec_hdr *ech;
92static struct ubi_vid_hdr *vidh;
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
116 int lnum, int ec, int to_head, struct list_head *list)
117{
118 struct ubi_ainf_peb *aeb;
119
120 if (list == &ai->free) {
121 dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
122 } else if (list == &ai->erase) {
123 dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
124 } else if (list == &ai->alien) {
125 dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
126 ai->alien_peb_count += 1;
127 } else
128 BUG();
129
130 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
131 if (!aeb)
132 return -ENOMEM;
133
134 aeb->pnum = pnum;
135 aeb->vol_id = vol_id;
136 aeb->lnum = lnum;
137 aeb->ec = ec;
138 if (to_head)
139 list_add(&aeb->u.list, list);
140 else
141 list_add_tail(&aeb->u.list, list);
142 return 0;
143}
144
145
146
147
148
149
150
151
152
153
154
155
156static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
157{
158 struct ubi_ainf_peb *aeb;
159
160 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
161
162 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
163 if (!aeb)
164 return -ENOMEM;
165
166 ai->corr_peb_count += 1;
167 aeb->pnum = pnum;
168 aeb->ec = ec;
169 list_add(&aeb->u.list, &ai->corr);
170 return 0;
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
188 const struct ubi_ainf_volume *av, int pnum)
189{
190 int vol_type = vid_hdr->vol_type;
191 int vol_id = be32_to_cpu(vid_hdr->vol_id);
192 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
193 int data_pad = be32_to_cpu(vid_hdr->data_pad);
194
195 if (av->leb_count != 0) {
196 int av_vol_type;
197
198
199
200
201
202
203
204 if (vol_id != av->vol_id) {
205 ubi_err("inconsistent vol_id");
206 goto bad;
207 }
208
209 if (av->vol_type == UBI_STATIC_VOLUME)
210 av_vol_type = UBI_VID_STATIC;
211 else
212 av_vol_type = UBI_VID_DYNAMIC;
213
214 if (vol_type != av_vol_type) {
215 ubi_err("inconsistent vol_type");
216 goto bad;
217 }
218
219 if (used_ebs != av->used_ebs) {
220 ubi_err("inconsistent used_ebs");
221 goto bad;
222 }
223
224 if (data_pad != av->data_pad) {
225 ubi_err("inconsistent data_pad");
226 goto bad;
227 }
228 }
229
230 return 0;
231
232bad:
233 ubi_err("inconsistent VID header at PEB %d", pnum);
234 ubi_dump_vid_hdr(vid_hdr);
235 ubi_dump_av(av);
236 return -EINVAL;
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
253 int vol_id, int pnum,
254 const struct ubi_vid_hdr *vid_hdr)
255{
256 struct ubi_ainf_volume *av;
257 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
258
259 ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
260
261
262 while (*p) {
263 parent = *p;
264 av = rb_entry(parent, struct ubi_ainf_volume, rb);
265
266 if (vol_id == av->vol_id)
267 return av;
268
269 if (vol_id > av->vol_id)
270 p = &(*p)->rb_left;
271 else
272 p = &(*p)->rb_right;
273 }
274
275
276 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
277 if (!av)
278 return ERR_PTR(-ENOMEM);
279
280 av->highest_lnum = av->leb_count = 0;
281 av->vol_id = vol_id;
282 av->root = RB_ROOT;
283 av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
284 av->data_pad = be32_to_cpu(vid_hdr->data_pad);
285 av->compat = vid_hdr->compat;
286 av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
287 : UBI_STATIC_VOLUME;
288 if (vol_id > ai->highest_vol_id)
289 ai->highest_vol_id = vol_id;
290
291 rb_link_node(&av->rb, parent, p);
292 rb_insert_color(&av->rb, &ai->volumes);
293 ai->vols_found += 1;
294 dbg_bld("added volume %d", vol_id);
295 return av;
296}
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
319 int pnum, const struct ubi_vid_hdr *vid_hdr)
320{
321 int len, err, second_is_newer, bitflips = 0, corrupted = 0;
322 uint32_t data_crc, crc;
323 struct ubi_vid_hdr *vh = NULL;
324 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
325
326 if (sqnum2 == aeb->sqnum) {
327
328
329
330
331
332
333
334
335 ubi_err("unsupported on-flash UBI format");
336 return -EINVAL;
337 }
338
339
340 second_is_newer = (sqnum2 > aeb->sqnum);
341
342
343
344
345
346
347
348
349
350
351 if (second_is_newer) {
352 if (!vid_hdr->copy_flag) {
353
354 dbg_bld("second PEB %d is newer, copy_flag is unset",
355 pnum);
356 return 1;
357 }
358 } else {
359 if (!aeb->copy_flag) {
360
361 dbg_bld("first PEB %d is newer, copy_flag is unset",
362 pnum);
363 return bitflips << 1;
364 }
365
366 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
367 if (!vh)
368 return -ENOMEM;
369
370 pnum = aeb->pnum;
371 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
372 if (err) {
373 if (err == UBI_IO_BITFLIPS)
374 bitflips = 1;
375 else {
376 ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
377 pnum, err);
378 if (err > 0)
379 err = -EIO;
380
381 goto out_free_vidh;
382 }
383 }
384
385 vid_hdr = vh;
386 }
387
388
389
390 len = be32_to_cpu(vid_hdr->data_size);
391
392 mutex_lock(&ubi->buf_mutex);
393 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
394 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
395 goto out_unlock;
396
397 data_crc = be32_to_cpu(vid_hdr->data_crc);
398 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
399 if (crc != data_crc) {
400 dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
401 pnum, crc, data_crc);
402 corrupted = 1;
403 bitflips = 0;
404 second_is_newer = !second_is_newer;
405 } else {
406 dbg_bld("PEB %d CRC is OK", pnum);
407 bitflips = !!err;
408 }
409 mutex_unlock(&ubi->buf_mutex);
410
411 ubi_free_vid_hdr(ubi, vh);
412
413 if (second_is_newer)
414 dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
415 else
416 dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
417
418 return second_is_newer | (bitflips << 1) | (corrupted << 2);
419
420out_unlock:
421 mutex_unlock(&ubi->buf_mutex);
422out_free_vidh:
423 ubi_free_vid_hdr(ubi, vh);
424 return err;
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
444 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
445{
446 int err, vol_id, lnum;
447 unsigned long long sqnum;
448 struct ubi_ainf_volume *av;
449 struct ubi_ainf_peb *aeb;
450 struct rb_node **p, *parent = NULL;
451
452 vol_id = be32_to_cpu(vid_hdr->vol_id);
453 lnum = be32_to_cpu(vid_hdr->lnum);
454 sqnum = be64_to_cpu(vid_hdr->sqnum);
455
456 dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
457 pnum, vol_id, lnum, ec, sqnum, bitflips);
458
459 av = add_volume(ai, vol_id, pnum, vid_hdr);
460 if (IS_ERR(av))
461 return PTR_ERR(av);
462
463 if (ai->max_sqnum < sqnum)
464 ai->max_sqnum = sqnum;
465
466
467
468
469
470 p = &av->root.rb_node;
471 while (*p) {
472 int cmp_res;
473
474 parent = *p;
475 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
476 if (lnum != aeb->lnum) {
477 if (lnum < aeb->lnum)
478 p = &(*p)->rb_left;
479 else
480 p = &(*p)->rb_right;
481 continue;
482 }
483
484
485
486
487
488
489 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
490 aeb->pnum, aeb->sqnum, aeb->ec);
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505 if (aeb->sqnum == sqnum && sqnum != 0) {
506 ubi_err("two LEBs with same sequence number %llu",
507 sqnum);
508 ubi_dump_aeb(aeb, 0);
509 ubi_dump_vid_hdr(vid_hdr);
510 return -EINVAL;
511 }
512
513
514
515
516
517 cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
518 if (cmp_res < 0)
519 return cmp_res;
520
521 if (cmp_res & 1) {
522
523
524
525
526 err = validate_vid_hdr(vid_hdr, av, pnum);
527 if (err)
528 return err;
529
530 err = add_to_list(ai, aeb->pnum, aeb->vol_id,
531 aeb->lnum, aeb->ec, cmp_res & 4,
532 &ai->erase);
533 if (err)
534 return err;
535
536 aeb->ec = ec;
537 aeb->pnum = pnum;
538 aeb->vol_id = vol_id;
539 aeb->lnum = lnum;
540 aeb->scrub = ((cmp_res & 2) || bitflips);
541 aeb->copy_flag = vid_hdr->copy_flag;
542 aeb->sqnum = sqnum;
543
544 if (av->highest_lnum == lnum)
545 av->last_data_size =
546 be32_to_cpu(vid_hdr->data_size);
547
548 return 0;
549 } else {
550
551
552
553
554 return add_to_list(ai, pnum, vol_id, lnum, ec,
555 cmp_res & 4, &ai->erase);
556 }
557 }
558
559
560
561
562
563
564 err = validate_vid_hdr(vid_hdr, av, pnum);
565 if (err)
566 return err;
567
568 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
569 if (!aeb)
570 return -ENOMEM;
571
572 aeb->ec = ec;
573 aeb->pnum = pnum;
574 aeb->vol_id = vol_id;
575 aeb->lnum = lnum;
576 aeb->scrub = bitflips;
577 aeb->copy_flag = vid_hdr->copy_flag;
578 aeb->sqnum = sqnum;
579
580 if (av->highest_lnum <= lnum) {
581 av->highest_lnum = lnum;
582 av->last_data_size = be32_to_cpu(vid_hdr->data_size);
583 }
584
585 av->leb_count += 1;
586 rb_link_node(&aeb->u.rb, parent, p);
587 rb_insert_color(&aeb->u.rb, &av->root);
588 return 0;
589}
590
591
592
593
594
595
596
597
598
599struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
600 int vol_id)
601{
602 struct ubi_ainf_volume *av;
603 struct rb_node *p = ai->volumes.rb_node;
604
605 while (p) {
606 av = rb_entry(p, struct ubi_ainf_volume, rb);
607
608 if (vol_id == av->vol_id)
609 return av;
610
611 if (vol_id > av->vol_id)
612 p = p->rb_left;
613 else
614 p = p->rb_right;
615 }
616
617 return NULL;
618}
619
620
621
622
623
624
625void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
626{
627 struct rb_node *rb;
628 struct ubi_ainf_peb *aeb;
629
630 dbg_bld("remove attaching information about volume %d", av->vol_id);
631
632 while ((rb = rb_first(&av->root))) {
633 aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
634 rb_erase(&aeb->u.rb, &av->root);
635 list_add_tail(&aeb->u.list, &ai->erase);
636 }
637
638 rb_erase(&av->rb, &ai->volumes);
639 kfree(av);
640 ai->vols_found -= 1;
641}
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656static int early_erase_peb(struct ubi_device *ubi,
657 const struct ubi_attach_info *ai, int pnum, int ec)
658{
659 int err;
660 struct ubi_ec_hdr *ec_hdr;
661
662 if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
663
664
665
666
667 ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
668 return -EINVAL;
669 }
670
671 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
672 if (!ec_hdr)
673 return -ENOMEM;
674
675 ec_hdr->ec = cpu_to_be64(ec);
676
677 err = ubi_io_sync_erase(ubi, pnum, 0);
678 if (err < 0)
679 goto out_free;
680
681 err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
682
683out_free:
684 kfree(ec_hdr);
685 return err;
686}
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
703 struct ubi_attach_info *ai)
704{
705 int err = 0;
706 struct ubi_ainf_peb *aeb, *tmp_aeb;
707
708 if (!list_empty(&ai->free)) {
709 aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
710 list_del(&aeb->u.list);
711 dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
712 return aeb;
713 }
714
715
716
717
718
719
720
721 list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
722 if (aeb->ec == UBI_UNKNOWN)
723 aeb->ec = ai->mean_ec;
724
725 err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
726 if (err)
727 continue;
728
729 aeb->ec += 1;
730 list_del(&aeb->u.list);
731 dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
732 return aeb;
733 }
734
735 ubi_err("no free eraseblocks");
736 return ERR_PTR(-ENOSPC);
737}
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
757 int pnum)
758{
759 int err;
760
761 mutex_lock(&ubi->buf_mutex);
762 memset(ubi->peb_buf, 0x00, ubi->leb_size);
763
764 err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
765 ubi->leb_size);
766 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
767
768
769
770
771
772
773
774 err = 0;
775 goto out_unlock;
776 }
777
778 if (err)
779 goto out_unlock;
780
781 if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
782 goto out_unlock;
783
784 ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
785 pnum);
786 ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
787 ubi_dump_vid_hdr(vid_hdr);
788 pr_err("hexdump of PEB %d offset %d, length %d",
789 pnum, ubi->leb_start, ubi->leb_size);
790 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
791 ubi->peb_buf, ubi->leb_size, 1);
792 err = 1;
793
794out_unlock:
795 mutex_unlock(&ubi->buf_mutex);
796 return err;
797}
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
813 int pnum, int *vid, unsigned long long *sqnum)
814{
815 long long uninitialized_var(ec);
816 int err, bitflips = 0, vol_id = -1, ec_err = 0;
817
818 dbg_bld("scan PEB %d", pnum);
819
820
821 err = ubi_io_is_bad(ubi, pnum);
822 if (err < 0)
823 return err;
824 else if (err) {
825 ai->bad_peb_count += 1;
826 return 0;
827 }
828
829 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
830 if (err < 0)
831 return err;
832 switch (err) {
833 case 0:
834 break;
835 case UBI_IO_BITFLIPS:
836 bitflips = 1;
837 break;
838 case UBI_IO_FF:
839 ai->empty_peb_count += 1;
840 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
841 UBI_UNKNOWN, 0, &ai->erase);
842 case UBI_IO_FF_BITFLIPS:
843 ai->empty_peb_count += 1;
844 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
845 UBI_UNKNOWN, 1, &ai->erase);
846 case UBI_IO_BAD_HDR_EBADMSG:
847 case UBI_IO_BAD_HDR:
848
849
850
851
852
853 ec_err = err;
854 ec = UBI_UNKNOWN;
855 bitflips = 1;
856 break;
857 default:
858 ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
859 return -EINVAL;
860 }
861
862 if (!ec_err) {
863 int image_seq;
864
865
866 if (ech->version != UBI_VERSION) {
867 ubi_err("this UBI version is %d, image version is %d",
868 UBI_VERSION, (int)ech->version);
869 return -EINVAL;
870 }
871
872 ec = be64_to_cpu(ech->ec);
873 if (ec > UBI_MAX_ERASECOUNTER) {
874
875
876
877
878
879
880
881 ubi_err("erase counter overflow, max is %d",
882 UBI_MAX_ERASECOUNTER);
883 ubi_dump_ec_hdr(ech);
884 return -EINVAL;
885 }
886
887
888
889
890
891
892
893
894
895
896
897
898 image_seq = be32_to_cpu(ech->image_seq);
899 if (!ubi->image_seq)
900 ubi->image_seq = image_seq;
901 if (image_seq && ubi->image_seq != image_seq) {
902 ubi_err("bad image sequence number %d in PEB %d, expected %d",
903 image_seq, pnum, ubi->image_seq);
904 ubi_dump_ec_hdr(ech);
905 return -EINVAL;
906 }
907 }
908
909
910
911 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
912 if (err < 0)
913 return err;
914 switch (err) {
915 case 0:
916 break;
917 case UBI_IO_BITFLIPS:
918 bitflips = 1;
919 break;
920 case UBI_IO_BAD_HDR_EBADMSG:
921 if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
922
923
924
925
926
927
928 ai->maybe_bad_peb_count += 1;
929 case UBI_IO_BAD_HDR:
930 if (ec_err)
931
932
933
934
935
936
937
938
939
940
941
942
943 err = 0;
944 else
945
946
947
948
949 err = check_corruption(ubi, vidh, pnum);
950
951 if (err < 0)
952 return err;
953 else if (!err)
954
955 err = add_to_list(ai, pnum, UBI_UNKNOWN,
956 UBI_UNKNOWN, ec, 1, &ai->erase);
957 else
958
959 err = add_corrupted(ai, pnum, ec);
960 if (err)
961 return err;
962 goto adjust_mean_ec;
963 case UBI_IO_FF_BITFLIPS:
964 err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
965 ec, 1, &ai->erase);
966 if (err)
967 return err;
968 goto adjust_mean_ec;
969 case UBI_IO_FF:
970 if (ec_err || bitflips)
971 err = add_to_list(ai, pnum, UBI_UNKNOWN,
972 UBI_UNKNOWN, ec, 1, &ai->erase);
973 else
974 err = add_to_list(ai, pnum, UBI_UNKNOWN,
975 UBI_UNKNOWN, ec, 0, &ai->free);
976 if (err)
977 return err;
978 goto adjust_mean_ec;
979 default:
980 ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
981 err);
982 return -EINVAL;
983 }
984
985 vol_id = be32_to_cpu(vidh->vol_id);
986 if (vid)
987 *vid = vol_id;
988 if (sqnum)
989 *sqnum = be64_to_cpu(vidh->sqnum);
990 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
991 int lnum = be32_to_cpu(vidh->lnum);
992
993
994 switch (vidh->compat) {
995 case UBI_COMPAT_DELETE:
996 if (vol_id != UBI_FM_SB_VOLUME_ID
997 && vol_id != UBI_FM_DATA_VOLUME_ID) {
998 ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
999 vol_id, lnum);
1000 }
1001 err = add_to_list(ai, pnum, vol_id, lnum,
1002 ec, 1, &ai->erase);
1003 if (err)
1004 return err;
1005 return 0;
1006
1007 case UBI_COMPAT_RO:
1008 ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
1009 vol_id, lnum);
1010 ubi->ro_mode = 1;
1011 break;
1012
1013 case UBI_COMPAT_PRESERVE:
1014 ubi_msg("\"preserve\" compatible internal volume %d:%d found",
1015 vol_id, lnum);
1016 err = add_to_list(ai, pnum, vol_id, lnum,
1017 ec, 0, &ai->alien);
1018 if (err)
1019 return err;
1020 return 0;
1021
1022 case UBI_COMPAT_REJECT:
1023 ubi_err("incompatible internal volume %d:%d found",
1024 vol_id, lnum);
1025 return -EINVAL;
1026 }
1027 }
1028
1029 if (ec_err)
1030 ubi_warn("valid VID header but corrupted EC header at PEB %d",
1031 pnum);
1032 err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
1033 if (err)
1034 return err;
1035
1036adjust_mean_ec:
1037 if (!ec_err) {
1038 ai->ec_sum += ec;
1039 ai->ec_count += 1;
1040 if (ec > ai->max_ec)
1041 ai->max_ec = ec;
1042 if (ec < ai->min_ec)
1043 ai->min_ec = ec;
1044 }
1045
1046 return 0;
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1061{
1062 struct ubi_ainf_peb *aeb;
1063 int max_corr, peb_count;
1064
1065 peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
1066 max_corr = peb_count / 20 ?: 8;
1067
1068
1069
1070
1071
1072
1073 if (ai->corr_peb_count) {
1074 ubi_err("%d PEBs are corrupted and preserved",
1075 ai->corr_peb_count);
1076 pr_err("Corrupted PEBs are:");
1077 list_for_each_entry(aeb, &ai->corr, u.list)
1078 pr_cont(" %d", aeb->pnum);
1079 pr_cont("\n");
1080
1081
1082
1083
1084
1085 if (ai->corr_peb_count >= max_corr) {
1086 ubi_err("too many corrupted PEBs, refusing");
1087 return -EINVAL;
1088 }
1089 }
1090
1091 if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 if (ai->maybe_bad_peb_count <= 2) {
1108 ai->is_empty = 1;
1109 ubi_msg("empty MTD device detected");
1110 get_random_bytes(&ubi->image_seq,
1111 sizeof(ubi->image_seq));
1112 } else {
1113 ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
1114 return -EINVAL;
1115 }
1116
1117 }
1118
1119 return 0;
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
1130{
1131 struct ubi_ainf_peb *aeb;
1132 struct rb_node *this = av->root.rb_node;
1133
1134 while (this) {
1135 if (this->rb_left)
1136 this = this->rb_left;
1137 else if (this->rb_right)
1138 this = this->rb_right;
1139 else {
1140 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
1141 this = rb_parent(this);
1142 if (this) {
1143 if (this->rb_left == &aeb->u.rb)
1144 this->rb_left = NULL;
1145 else
1146 this->rb_right = NULL;
1147 }
1148
1149 kmem_cache_free(ai->aeb_slab_cache, aeb);
1150 }
1151 }
1152 kfree(av);
1153}
1154
1155
1156
1157
1158
1159static void destroy_ai(struct ubi_attach_info *ai)
1160{
1161 struct ubi_ainf_peb *aeb, *aeb_tmp;
1162 struct ubi_ainf_volume *av;
1163 struct rb_node *rb;
1164
1165 list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
1166 list_del(&aeb->u.list);
1167 kmem_cache_free(ai->aeb_slab_cache, aeb);
1168 }
1169 list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
1170 list_del(&aeb->u.list);
1171 kmem_cache_free(ai->aeb_slab_cache, aeb);
1172 }
1173 list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
1174 list_del(&aeb->u.list);
1175 kmem_cache_free(ai->aeb_slab_cache, aeb);
1176 }
1177 list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
1178 list_del(&aeb->u.list);
1179 kmem_cache_free(ai->aeb_slab_cache, aeb);
1180 }
1181
1182
1183 rb = ai->volumes.rb_node;
1184 while (rb) {
1185 if (rb->rb_left)
1186 rb = rb->rb_left;
1187 else if (rb->rb_right)
1188 rb = rb->rb_right;
1189 else {
1190 av = rb_entry(rb, struct ubi_ainf_volume, rb);
1191
1192 rb = rb_parent(rb);
1193 if (rb) {
1194 if (rb->rb_left == &av->rb)
1195 rb->rb_left = NULL;
1196 else
1197 rb->rb_right = NULL;
1198 }
1199
1200 destroy_av(ai, av);
1201 }
1202 }
1203
1204 if (ai->aeb_slab_cache)
1205 kmem_cache_destroy(ai->aeb_slab_cache);
1206
1207 kfree(ai);
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
1221 int start)
1222{
1223 int err, pnum;
1224 struct rb_node *rb1, *rb2;
1225 struct ubi_ainf_volume *av;
1226 struct ubi_ainf_peb *aeb;
1227
1228 err = -ENOMEM;
1229
1230 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1231 if (!ech)
1232 return err;
1233
1234 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1235 if (!vidh)
1236 goto out_ech;
1237
1238 for (pnum = start; pnum < ubi->peb_count; pnum++) {
1239 cond_resched();
1240
1241 dbg_gen("process PEB %d", pnum);
1242 err = scan_peb(ubi, ai, pnum, NULL, NULL);
1243 if (err < 0)
1244 goto out_vidh;
1245 }
1246
1247 ubi_msg("scanning is finished");
1248
1249
1250 if (ai->ec_count)
1251 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
1252
1253 err = late_analysis(ubi, ai);
1254 if (err)
1255 goto out_vidh;
1256
1257
1258
1259
1260
1261 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1262 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1263 if (aeb->ec == UBI_UNKNOWN)
1264 aeb->ec = ai->mean_ec;
1265 }
1266
1267 list_for_each_entry(aeb, &ai->free, u.list) {
1268 if (aeb->ec == UBI_UNKNOWN)
1269 aeb->ec = ai->mean_ec;
1270 }
1271
1272 list_for_each_entry(aeb, &ai->corr, u.list)
1273 if (aeb->ec == UBI_UNKNOWN)
1274 aeb->ec = ai->mean_ec;
1275
1276 list_for_each_entry(aeb, &ai->erase, u.list)
1277 if (aeb->ec == UBI_UNKNOWN)
1278 aeb->ec = ai->mean_ec;
1279
1280 err = self_check_ai(ubi, ai);
1281 if (err)
1282 goto out_vidh;
1283
1284 ubi_free_vid_hdr(ubi, vidh);
1285 kfree(ech);
1286
1287 return 0;
1288
1289out_vidh:
1290 ubi_free_vid_hdr(ubi, vidh);
1291out_ech:
1292 kfree(ech);
1293 return err;
1294}
1295
1296#ifdef CONFIG_MTD_UBI_FASTMAP
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
1309{
1310 int err, pnum, fm_anchor = -1;
1311 unsigned long long max_sqnum = 0;
1312
1313 err = -ENOMEM;
1314
1315 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1316 if (!ech)
1317 goto out;
1318
1319 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1320 if (!vidh)
1321 goto out_ech;
1322
1323 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
1324 int vol_id = -1;
1325 unsigned long long sqnum = -1;
1326 cond_resched();
1327
1328 dbg_gen("process PEB %d", pnum);
1329 err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
1330 if (err < 0)
1331 goto out_vidh;
1332
1333 if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
1334 max_sqnum = sqnum;
1335 fm_anchor = pnum;
1336 }
1337 }
1338
1339 ubi_free_vid_hdr(ubi, vidh);
1340 kfree(ech);
1341
1342 if (fm_anchor < 0)
1343 return UBI_NO_FASTMAP;
1344
1345 return ubi_scan_fastmap(ubi, ai, fm_anchor);
1346
1347out_vidh:
1348 ubi_free_vid_hdr(ubi, vidh);
1349out_ech:
1350 kfree(ech);
1351out:
1352 return err;
1353}
1354
1355#endif
1356
1357static struct ubi_attach_info *alloc_ai(const char *slab_name)
1358{
1359 struct ubi_attach_info *ai;
1360
1361 ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
1362 if (!ai)
1363 return ai;
1364
1365 INIT_LIST_HEAD(&ai->corr);
1366 INIT_LIST_HEAD(&ai->free);
1367 INIT_LIST_HEAD(&ai->erase);
1368 INIT_LIST_HEAD(&ai->alien);
1369 ai->volumes = RB_ROOT;
1370 ai->aeb_slab_cache = kmem_cache_create(slab_name,
1371 sizeof(struct ubi_ainf_peb),
1372 0, 0, NULL);
1373 if (!ai->aeb_slab_cache) {
1374 kfree(ai);
1375 ai = NULL;
1376 }
1377
1378 return ai;
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389int ubi_attach(struct ubi_device *ubi, int force_scan)
1390{
1391 int err;
1392 struct ubi_attach_info *ai;
1393
1394 ai = alloc_ai("ubi_aeb_slab_cache");
1395 if (!ai)
1396 return -ENOMEM;
1397
1398#ifdef CONFIG_MTD_UBI_FASTMAP
1399
1400 if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
1401 ubi->fm_disabled = 1;
1402 force_scan = 1;
1403 }
1404
1405 if (force_scan)
1406 err = scan_all(ubi, ai, 0);
1407 else {
1408 err = scan_fast(ubi, ai);
1409 if (err > 0) {
1410 if (err != UBI_NO_FASTMAP) {
1411 destroy_ai(ai);
1412 ai = alloc_ai("ubi_aeb_slab_cache2");
1413 if (!ai)
1414 return -ENOMEM;
1415
1416 err = scan_all(ubi, ai, 0);
1417 } else {
1418 err = scan_all(ubi, ai, UBI_FM_MAX_START);
1419 }
1420 }
1421 }
1422#else
1423 err = scan_all(ubi, ai, 0);
1424#endif
1425 if (err)
1426 goto out_ai;
1427
1428 ubi->bad_peb_count = ai->bad_peb_count;
1429 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
1430 ubi->corr_peb_count = ai->corr_peb_count;
1431 ubi->max_ec = ai->max_ec;
1432 ubi->mean_ec = ai->mean_ec;
1433 dbg_gen("max. sequence number: %llu", ai->max_sqnum);
1434
1435 err = ubi_read_volume_table(ubi, ai);
1436 if (err)
1437 goto out_ai;
1438
1439 err = ubi_wl_init(ubi, ai);
1440 if (err)
1441 goto out_vtbl;
1442
1443 err = ubi_eba_init(ubi, ai);
1444 if (err)
1445 goto out_wl;
1446
1447#ifdef CONFIG_MTD_UBI_FASTMAP
1448 if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
1449 struct ubi_attach_info *scan_ai;
1450
1451 scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
1452 if (!scan_ai) {
1453 err = -ENOMEM;
1454 goto out_wl;
1455 }
1456
1457 err = scan_all(ubi, scan_ai, 0);
1458 if (err) {
1459 destroy_ai(scan_ai);
1460 goto out_wl;
1461 }
1462
1463 err = self_check_eba(ubi, ai, scan_ai);
1464 destroy_ai(scan_ai);
1465
1466 if (err)
1467 goto out_wl;
1468 }
1469#endif
1470
1471 destroy_ai(ai);
1472 return 0;
1473
1474out_wl:
1475 ubi_wl_close(ubi);
1476out_vtbl:
1477 ubi_free_internal_volumes(ubi);
1478 vfree(ubi->vtbl);
1479out_ai:
1480 destroy_ai(ai);
1481 return err;
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1493{
1494 int pnum, err, vols_found = 0;
1495 struct rb_node *rb1, *rb2;
1496 struct ubi_ainf_volume *av;
1497 struct ubi_ainf_peb *aeb, *last_aeb;
1498 uint8_t *buf;
1499
1500 if (!ubi_dbg_chk_gen(ubi))
1501 return 0;
1502
1503
1504
1505
1506 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1507 int leb_count = 0;
1508
1509 cond_resched();
1510
1511 vols_found += 1;
1512
1513 if (ai->is_empty) {
1514 ubi_err("bad is_empty flag");
1515 goto bad_av;
1516 }
1517
1518 if (av->vol_id < 0 || av->highest_lnum < 0 ||
1519 av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
1520 av->data_pad < 0 || av->last_data_size < 0) {
1521 ubi_err("negative values");
1522 goto bad_av;
1523 }
1524
1525 if (av->vol_id >= UBI_MAX_VOLUMES &&
1526 av->vol_id < UBI_INTERNAL_VOL_START) {
1527 ubi_err("bad vol_id");
1528 goto bad_av;
1529 }
1530
1531 if (av->vol_id > ai->highest_vol_id) {
1532 ubi_err("highest_vol_id is %d, but vol_id %d is there",
1533 ai->highest_vol_id, av->vol_id);
1534 goto out;
1535 }
1536
1537 if (av->vol_type != UBI_DYNAMIC_VOLUME &&
1538 av->vol_type != UBI_STATIC_VOLUME) {
1539 ubi_err("bad vol_type");
1540 goto bad_av;
1541 }
1542
1543 if (av->data_pad > ubi->leb_size / 2) {
1544 ubi_err("bad data_pad");
1545 goto bad_av;
1546 }
1547
1548 last_aeb = NULL;
1549 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1550 cond_resched();
1551
1552 last_aeb = aeb;
1553 leb_count += 1;
1554
1555 if (aeb->pnum < 0 || aeb->ec < 0) {
1556 ubi_err("negative values");
1557 goto bad_aeb;
1558 }
1559
1560 if (aeb->ec < ai->min_ec) {
1561 ubi_err("bad ai->min_ec (%d), %d found",
1562 ai->min_ec, aeb->ec);
1563 goto bad_aeb;
1564 }
1565
1566 if (aeb->ec > ai->max_ec) {
1567 ubi_err("bad ai->max_ec (%d), %d found",
1568 ai->max_ec, aeb->ec);
1569 goto bad_aeb;
1570 }
1571
1572 if (aeb->pnum >= ubi->peb_count) {
1573 ubi_err("too high PEB number %d, total PEBs %d",
1574 aeb->pnum, ubi->peb_count);
1575 goto bad_aeb;
1576 }
1577
1578 if (av->vol_type == UBI_STATIC_VOLUME) {
1579 if (aeb->lnum >= av->used_ebs) {
1580 ubi_err("bad lnum or used_ebs");
1581 goto bad_aeb;
1582 }
1583 } else {
1584 if (av->used_ebs != 0) {
1585 ubi_err("non-zero used_ebs");
1586 goto bad_aeb;
1587 }
1588 }
1589
1590 if (aeb->lnum > av->highest_lnum) {
1591 ubi_err("incorrect highest_lnum or lnum");
1592 goto bad_aeb;
1593 }
1594 }
1595
1596 if (av->leb_count != leb_count) {
1597 ubi_err("bad leb_count, %d objects in the tree",
1598 leb_count);
1599 goto bad_av;
1600 }
1601
1602 if (!last_aeb)
1603 continue;
1604
1605 aeb = last_aeb;
1606
1607 if (aeb->lnum != av->highest_lnum) {
1608 ubi_err("bad highest_lnum");
1609 goto bad_aeb;
1610 }
1611 }
1612
1613 if (vols_found != ai->vols_found) {
1614 ubi_err("bad ai->vols_found %d, should be %d",
1615 ai->vols_found, vols_found);
1616 goto out;
1617 }
1618
1619
1620 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1621 last_aeb = NULL;
1622 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1623 int vol_type;
1624
1625 cond_resched();
1626
1627 last_aeb = aeb;
1628
1629 err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
1630 if (err && err != UBI_IO_BITFLIPS) {
1631 ubi_err("VID header is not OK (%d)", err);
1632 if (err > 0)
1633 err = -EIO;
1634 return err;
1635 }
1636
1637 vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
1638 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
1639 if (av->vol_type != vol_type) {
1640 ubi_err("bad vol_type");
1641 goto bad_vid_hdr;
1642 }
1643
1644 if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
1645 ubi_err("bad sqnum %llu", aeb->sqnum);
1646 goto bad_vid_hdr;
1647 }
1648
1649 if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
1650 ubi_err("bad vol_id %d", av->vol_id);
1651 goto bad_vid_hdr;
1652 }
1653
1654 if (av->compat != vidh->compat) {
1655 ubi_err("bad compat %d", vidh->compat);
1656 goto bad_vid_hdr;
1657 }
1658
1659 if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
1660 ubi_err("bad lnum %d", aeb->lnum);
1661 goto bad_vid_hdr;
1662 }
1663
1664 if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
1665 ubi_err("bad used_ebs %d", av->used_ebs);
1666 goto bad_vid_hdr;
1667 }
1668
1669 if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
1670 ubi_err("bad data_pad %d", av->data_pad);
1671 goto bad_vid_hdr;
1672 }
1673 }
1674
1675 if (!last_aeb)
1676 continue;
1677
1678 if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
1679 ubi_err("bad highest_lnum %d", av->highest_lnum);
1680 goto bad_vid_hdr;
1681 }
1682
1683 if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
1684 ubi_err("bad last_data_size %d", av->last_data_size);
1685 goto bad_vid_hdr;
1686 }
1687 }
1688
1689
1690
1691
1692
1693 buf = kzalloc(ubi->peb_count, GFP_KERNEL);
1694 if (!buf)
1695 return -ENOMEM;
1696
1697 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
1698 err = ubi_io_is_bad(ubi, pnum);
1699 if (err < 0) {
1700 kfree(buf);
1701 return err;
1702 } else if (err)
1703 buf[pnum] = 1;
1704 }
1705
1706 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
1707 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1708 buf[aeb->pnum] = 1;
1709
1710 list_for_each_entry(aeb, &ai->free, u.list)
1711 buf[aeb->pnum] = 1;
1712
1713 list_for_each_entry(aeb, &ai->corr, u.list)
1714 buf[aeb->pnum] = 1;
1715
1716 list_for_each_entry(aeb, &ai->erase, u.list)
1717 buf[aeb->pnum] = 1;
1718
1719 list_for_each_entry(aeb, &ai->alien, u.list)
1720 buf[aeb->pnum] = 1;
1721
1722 err = 0;
1723 for (pnum = 0; pnum < ubi->peb_count; pnum++)
1724 if (!buf[pnum]) {
1725 ubi_err("PEB %d is not referred", pnum);
1726 err = 1;
1727 }
1728
1729 kfree(buf);
1730 if (err)
1731 goto out;
1732 return 0;
1733
1734bad_aeb:
1735 ubi_err("bad attaching information about LEB %d", aeb->lnum);
1736 ubi_dump_aeb(aeb, 0);
1737 ubi_dump_av(av);
1738 goto out;
1739
1740bad_av:
1741 ubi_err("bad attaching information about volume %d", av->vol_id);
1742 ubi_dump_av(av);
1743 goto out;
1744
1745bad_vid_hdr:
1746 ubi_err("bad attaching information about volume %d", av->vol_id);
1747 ubi_dump_av(av);
1748 ubi_dump_vid_hdr(vidh);
1749
1750out:
1751 dump_stack();
1752 return -EINVAL;
1753}
1754