1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72#ifndef __UBOOT__
73#include <linux/err.h>
74#include <linux/slab.h>
75#include <linux/crc32.h>
76#include <linux/random.h>
77#else
78#include <div64.h>
79#include <linux/err.h>
80#endif
81
82#include <linux/math64.h>
83
84#include <ubi_uboot.h>
85#include "ubi.h"
86
87static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
88
89
90static struct ubi_ec_hdr *ech;
91static struct ubi_vid_hdr *vidh;
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
115 int lnum, int ec, int to_head, struct list_head *list)
116{
117 struct ubi_ainf_peb *aeb;
118
119 if (list == &ai->free) {
120 dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
121 } else if (list == &ai->erase) {
122 dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
123 } else if (list == &ai->alien) {
124 dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
125 ai->alien_peb_count += 1;
126 } else
127 BUG();
128
129 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
130 if (!aeb)
131 return -ENOMEM;
132
133 aeb->pnum = pnum;
134 aeb->vol_id = vol_id;
135 aeb->lnum = lnum;
136 aeb->ec = ec;
137 if (to_head)
138 list_add(&aeb->u.list, list);
139 else
140 list_add_tail(&aeb->u.list, list);
141 return 0;
142}
143
144
145
146
147
148
149
150
151
152
153
154
155static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
156{
157 struct ubi_ainf_peb *aeb;
158
159 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
160
161 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
162 if (!aeb)
163 return -ENOMEM;
164
165 ai->corr_peb_count += 1;
166 aeb->pnum = pnum;
167 aeb->ec = ec;
168 list_add(&aeb->u.list, &ai->corr);
169 return 0;
170}
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187static int validate_vid_hdr(const struct ubi_device *ubi,
188 const struct ubi_vid_hdr *vid_hdr,
189 const struct ubi_ainf_volume *av, int pnum)
190{
191 int vol_type = vid_hdr->vol_type;
192 int vol_id = be32_to_cpu(vid_hdr->vol_id);
193 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
194 int data_pad = be32_to_cpu(vid_hdr->data_pad);
195
196 if (av->leb_count != 0) {
197 int av_vol_type;
198
199
200
201
202
203
204
205 if (vol_id != av->vol_id) {
206 ubi_err(ubi, "inconsistent vol_id");
207 goto bad;
208 }
209
210 if (av->vol_type == UBI_STATIC_VOLUME)
211 av_vol_type = UBI_VID_STATIC;
212 else
213 av_vol_type = UBI_VID_DYNAMIC;
214
215 if (vol_type != av_vol_type) {
216 ubi_err(ubi, "inconsistent vol_type");
217 goto bad;
218 }
219
220 if (used_ebs != av->used_ebs) {
221 ubi_err(ubi, "inconsistent used_ebs");
222 goto bad;
223 }
224
225 if (data_pad != av->data_pad) {
226 ubi_err(ubi, "inconsistent data_pad");
227 goto bad;
228 }
229 }
230
231 return 0;
232
233bad:
234 ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
235 ubi_dump_vid_hdr(vid_hdr);
236 ubi_dump_av(av);
237 return -EINVAL;
238}
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
254 int vol_id, int pnum,
255 const struct ubi_vid_hdr *vid_hdr)
256{
257 struct ubi_ainf_volume *av;
258 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
259
260 ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
261
262
263 while (*p) {
264 parent = *p;
265 av = rb_entry(parent, struct ubi_ainf_volume, rb);
266
267 if (vol_id == av->vol_id)
268 return av;
269
270 if (vol_id > av->vol_id)
271 p = &(*p)->rb_left;
272 else
273 p = &(*p)->rb_right;
274 }
275
276
277 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
278 if (!av)
279 return ERR_PTR(-ENOMEM);
280
281 av->highest_lnum = av->leb_count = 0;
282 av->vol_id = vol_id;
283 av->root = RB_ROOT;
284 av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
285 av->data_pad = be32_to_cpu(vid_hdr->data_pad);
286 av->compat = vid_hdr->compat;
287 av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
288 : UBI_STATIC_VOLUME;
289 if (vol_id > ai->highest_vol_id)
290 ai->highest_vol_id = vol_id;
291
292 rb_link_node(&av->rb, parent, p);
293 rb_insert_color(&av->rb, &ai->volumes);
294 ai->vols_found += 1;
295 dbg_bld("added volume %d", vol_id);
296 return av;
297}
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
320 int pnum, const struct ubi_vid_hdr *vid_hdr)
321{
322 int len, err, second_is_newer, bitflips = 0, corrupted = 0;
323 uint32_t data_crc, crc;
324 struct ubi_vid_hdr *vh = NULL;
325 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
326
327 if (sqnum2 == aeb->sqnum) {
328
329
330
331
332
333
334
335
336 ubi_err(ubi, "unsupported on-flash UBI format");
337 return -EINVAL;
338 }
339
340
341 second_is_newer = (sqnum2 > aeb->sqnum);
342
343
344
345
346
347
348
349
350
351
352 if (second_is_newer) {
353 if (!vid_hdr->copy_flag) {
354
355 dbg_bld("second PEB %d is newer, copy_flag is unset",
356 pnum);
357 return 1;
358 }
359 } else {
360 if (!aeb->copy_flag) {
361
362 dbg_bld("first PEB %d is newer, copy_flag is unset",
363 pnum);
364 return bitflips << 1;
365 }
366
367 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
368 if (!vh)
369 return -ENOMEM;
370
371 pnum = aeb->pnum;
372 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
373 if (err) {
374 if (err == UBI_IO_BITFLIPS)
375 bitflips = 1;
376 else {
377 ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
378 pnum, err);
379 if (err > 0)
380 err = -EIO;
381
382 goto out_free_vidh;
383 }
384 }
385
386 vid_hdr = vh;
387 }
388
389
390
391 len = be32_to_cpu(vid_hdr->data_size);
392
393 mutex_lock(&ubi->buf_mutex);
394 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
395 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
396 goto out_unlock;
397
398 data_crc = be32_to_cpu(vid_hdr->data_crc);
399 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
400 if (crc != data_crc) {
401 dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
402 pnum, crc, data_crc);
403 corrupted = 1;
404 bitflips = 0;
405 second_is_newer = !second_is_newer;
406 } else {
407 dbg_bld("PEB %d CRC is OK", pnum);
408 bitflips |= !!err;
409 }
410 mutex_unlock(&ubi->buf_mutex);
411
412 ubi_free_vid_hdr(ubi, vh);
413
414 if (second_is_newer)
415 dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
416 else
417 dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
418
419 return second_is_newer | (bitflips << 1) | (corrupted << 2);
420
421out_unlock:
422 mutex_unlock(&ubi->buf_mutex);
423out_free_vidh:
424 ubi_free_vid_hdr(ubi, vh);
425 return err;
426}
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
445 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
446{
447 int err, vol_id, lnum;
448 unsigned long long sqnum;
449 struct ubi_ainf_volume *av;
450 struct ubi_ainf_peb *aeb;
451 struct rb_node **p, *parent = NULL;
452
453 vol_id = be32_to_cpu(vid_hdr->vol_id);
454 lnum = be32_to_cpu(vid_hdr->lnum);
455 sqnum = be64_to_cpu(vid_hdr->sqnum);
456
457 dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
458 pnum, vol_id, lnum, ec, sqnum, bitflips);
459
460 av = add_volume(ai, vol_id, pnum, vid_hdr);
461 if (IS_ERR(av))
462 return PTR_ERR(av);
463
464 if (ai->max_sqnum < sqnum)
465 ai->max_sqnum = sqnum;
466
467
468
469
470
471 p = &av->root.rb_node;
472 while (*p) {
473 int cmp_res;
474
475 parent = *p;
476 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
477 if (lnum != aeb->lnum) {
478 if (lnum < aeb->lnum)
479 p = &(*p)->rb_left;
480 else
481 p = &(*p)->rb_right;
482 continue;
483 }
484
485
486
487
488
489
490 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
491 aeb->pnum, aeb->sqnum, aeb->ec);
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506 if (aeb->sqnum == sqnum && sqnum != 0) {
507 ubi_err(ubi, "two LEBs with same sequence number %llu",
508 sqnum);
509 ubi_dump_aeb(aeb, 0);
510 ubi_dump_vid_hdr(vid_hdr);
511 return -EINVAL;
512 }
513
514
515
516
517
518 cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
519 if (cmp_res < 0)
520 return cmp_res;
521
522 if (cmp_res & 1) {
523
524
525
526
527 err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
528 if (err)
529 return err;
530
531 err = add_to_list(ai, aeb->pnum, aeb->vol_id,
532 aeb->lnum, aeb->ec, cmp_res & 4,
533 &ai->erase);
534 if (err)
535 return err;
536
537 aeb->ec = ec;
538 aeb->pnum = pnum;
539 aeb->vol_id = vol_id;
540 aeb->lnum = lnum;
541 aeb->scrub = ((cmp_res & 2) || bitflips);
542 aeb->copy_flag = vid_hdr->copy_flag;
543 aeb->sqnum = sqnum;
544
545 if (av->highest_lnum == lnum)
546 av->last_data_size =
547 be32_to_cpu(vid_hdr->data_size);
548
549 return 0;
550 } else {
551
552
553
554
555 return add_to_list(ai, pnum, vol_id, lnum, ec,
556 cmp_res & 4, &ai->erase);
557 }
558 }
559
560
561
562
563
564
565 err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
566 if (err)
567 return err;
568
569 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
570 if (!aeb)
571 return -ENOMEM;
572
573 aeb->ec = ec;
574 aeb->pnum = pnum;
575 aeb->vol_id = vol_id;
576 aeb->lnum = lnum;
577 aeb->scrub = bitflips;
578 aeb->copy_flag = vid_hdr->copy_flag;
579 aeb->sqnum = sqnum;
580
581 if (av->highest_lnum <= lnum) {
582 av->highest_lnum = lnum;
583 av->last_data_size = be32_to_cpu(vid_hdr->data_size);
584 }
585
586 av->leb_count += 1;
587 rb_link_node(&aeb->u.rb, parent, p);
588 rb_insert_color(&aeb->u.rb, &av->root);
589 return 0;
590}
591
592
593
594
595
596
597
598
599
600struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
601 int vol_id)
602{
603 struct ubi_ainf_volume *av;
604 struct rb_node *p = ai->volumes.rb_node;
605
606 while (p) {
607 av = rb_entry(p, struct ubi_ainf_volume, rb);
608
609 if (vol_id == av->vol_id)
610 return av;
611
612 if (vol_id > av->vol_id)
613 p = p->rb_left;
614 else
615 p = p->rb_right;
616 }
617
618 return NULL;
619}
620
621
622
623
624
625
626void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
627{
628 struct rb_node *rb;
629 struct ubi_ainf_peb *aeb;
630
631 dbg_bld("remove attaching information about volume %d", av->vol_id);
632
633 while ((rb = rb_first(&av->root))) {
634 aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
635 rb_erase(&aeb->u.rb, &av->root);
636 list_add_tail(&aeb->u.list, &ai->erase);
637 }
638
639 rb_erase(&av->rb, &ai->volumes);
640 kfree(av);
641 ai->vols_found -= 1;
642}
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657static int early_erase_peb(struct ubi_device *ubi,
658 const struct ubi_attach_info *ai, int pnum, int ec)
659{
660 int err;
661 struct ubi_ec_hdr *ec_hdr;
662
663 if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
664
665
666
667
668 ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
669 pnum, ec);
670 return -EINVAL;
671 }
672
673 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
674 if (!ec_hdr)
675 return -ENOMEM;
676
677 ec_hdr->ec = cpu_to_be64(ec);
678
679 err = ubi_io_sync_erase(ubi, pnum, 0);
680 if (err < 0)
681 goto out_free;
682
683 err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
684
685out_free:
686 kfree(ec_hdr);
687 return err;
688}
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
705 struct ubi_attach_info *ai)
706{
707 int err = 0;
708 struct ubi_ainf_peb *aeb, *tmp_aeb;
709
710 if (!list_empty(&ai->free)) {
711 aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
712 list_del(&aeb->u.list);
713 dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
714 return aeb;
715 }
716
717
718
719
720
721
722
723 list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
724 if (aeb->ec == UBI_UNKNOWN)
725 aeb->ec = ai->mean_ec;
726
727 err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
728 if (err)
729 continue;
730
731 aeb->ec += 1;
732 list_del(&aeb->u.list);
733 dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
734 return aeb;
735 }
736
737 ubi_err(ubi, "no free eraseblocks");
738 return ERR_PTR(-ENOSPC);
739}
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
759 int pnum)
760{
761 int err;
762
763 mutex_lock(&ubi->buf_mutex);
764 memset(ubi->peb_buf, 0x00, ubi->leb_size);
765
766 err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
767 ubi->leb_size);
768 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
769
770
771
772
773
774
775
776 err = 0;
777 goto out_unlock;
778 }
779
780 if (err)
781 goto out_unlock;
782
783 if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
784 goto out_unlock;
785
786 ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
787 pnum);
788 ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
789 ubi_dump_vid_hdr(vid_hdr);
790 pr_err("hexdump of PEB %d offset %d, length %d",
791 pnum, ubi->leb_start, ubi->leb_size);
792 ubi_dbg_print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1,
793 ubi->peb_buf, ubi->leb_size, 1);
794 err = 1;
795
796out_unlock:
797 mutex_unlock(&ubi->buf_mutex);
798 return err;
799}
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
815 int pnum, int *vid, unsigned long long *sqnum)
816{
817 long long uninitialized_var(ec);
818 int err, bitflips = 0, vol_id = -1, ec_err = 0;
819
820 dbg_bld("scan PEB %d", pnum);
821
822
823 err = ubi_io_is_bad(ubi, pnum);
824 if (err < 0)
825 return err;
826 else if (err) {
827 ai->bad_peb_count += 1;
828 return 0;
829 }
830
831 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
832 if (err < 0)
833 return err;
834 switch (err) {
835 case 0:
836 break;
837 case UBI_IO_BITFLIPS:
838 bitflips = 1;
839 break;
840 case UBI_IO_FF:
841 ai->empty_peb_count += 1;
842 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
843 UBI_UNKNOWN, 0, &ai->erase);
844 case UBI_IO_FF_BITFLIPS:
845 ai->empty_peb_count += 1;
846 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
847 UBI_UNKNOWN, 1, &ai->erase);
848 case UBI_IO_BAD_HDR_EBADMSG:
849 case UBI_IO_BAD_HDR:
850
851
852
853
854
855 ec_err = err;
856 ec = UBI_UNKNOWN;
857 bitflips = 1;
858 break;
859 default:
860 ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
861 err);
862 return -EINVAL;
863 }
864
865 if (!ec_err) {
866 int image_seq;
867
868
869 if (ech->version != UBI_VERSION) {
870 ubi_err(ubi, "this UBI version is %d, image version is %d",
871 UBI_VERSION, (int)ech->version);
872 return -EINVAL;
873 }
874
875 ec = be64_to_cpu(ech->ec);
876 if (ec > UBI_MAX_ERASECOUNTER) {
877
878
879
880
881
882
883
884 ubi_err(ubi, "erase counter overflow, max is %d",
885 UBI_MAX_ERASECOUNTER);
886 ubi_dump_ec_hdr(ech);
887 return -EINVAL;
888 }
889
890
891
892
893
894
895
896
897
898
899
900
901 image_seq = be32_to_cpu(ech->image_seq);
902 if (!ubi->image_seq)
903 ubi->image_seq = image_seq;
904 if (image_seq && ubi->image_seq != image_seq) {
905 ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
906 image_seq, pnum, ubi->image_seq);
907 ubi_dump_ec_hdr(ech);
908 return -EINVAL;
909 }
910 }
911
912
913
914 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
915 if (err < 0)
916 return err;
917 switch (err) {
918 case 0:
919 break;
920 case UBI_IO_BITFLIPS:
921 bitflips = 1;
922 break;
923 case UBI_IO_BAD_HDR_EBADMSG:
924 if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
925
926
927
928
929
930
931 ai->maybe_bad_peb_count += 1;
932 case UBI_IO_BAD_HDR:
933 if (ec_err)
934
935
936
937
938
939
940
941
942
943
944
945
946 err = 0;
947 else
948
949
950
951
952 err = check_corruption(ubi, vidh, pnum);
953
954 if (err < 0)
955 return err;
956 else if (!err)
957
958 err = add_to_list(ai, pnum, UBI_UNKNOWN,
959 UBI_UNKNOWN, ec, 1, &ai->erase);
960 else
961
962 err = add_corrupted(ai, pnum, ec);
963 if (err)
964 return err;
965 goto adjust_mean_ec;
966 case UBI_IO_FF_BITFLIPS:
967 err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
968 ec, 1, &ai->erase);
969 if (err)
970 return err;
971 goto adjust_mean_ec;
972 case UBI_IO_FF:
973 if (ec_err || bitflips)
974 err = add_to_list(ai, pnum, UBI_UNKNOWN,
975 UBI_UNKNOWN, ec, 1, &ai->erase);
976 else
977 err = add_to_list(ai, pnum, UBI_UNKNOWN,
978 UBI_UNKNOWN, ec, 0, &ai->free);
979 if (err)
980 return err;
981 goto adjust_mean_ec;
982 default:
983 ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
984 err);
985 return -EINVAL;
986 }
987
988 vol_id = be32_to_cpu(vidh->vol_id);
989 if (vid)
990 *vid = vol_id;
991 if (sqnum)
992 *sqnum = be64_to_cpu(vidh->sqnum);
993 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
994 int lnum = be32_to_cpu(vidh->lnum);
995
996
997 switch (vidh->compat) {
998 case UBI_COMPAT_DELETE:
999 if (vol_id != UBI_FM_SB_VOLUME_ID
1000 && vol_id != UBI_FM_DATA_VOLUME_ID) {
1001 ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
1002 vol_id, lnum);
1003 }
1004 err = add_to_list(ai, pnum, vol_id, lnum,
1005 ec, 1, &ai->erase);
1006 if (err)
1007 return err;
1008 return 0;
1009
1010 case UBI_COMPAT_RO:
1011 ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
1012 vol_id, lnum);
1013 ubi->ro_mode = 1;
1014 break;
1015
1016 case UBI_COMPAT_PRESERVE:
1017 ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
1018 vol_id, lnum);
1019 err = add_to_list(ai, pnum, vol_id, lnum,
1020 ec, 0, &ai->alien);
1021 if (err)
1022 return err;
1023 return 0;
1024
1025 case UBI_COMPAT_REJECT:
1026 ubi_err(ubi, "incompatible internal volume %d:%d found",
1027 vol_id, lnum);
1028 return -EINVAL;
1029 }
1030 }
1031
1032 if (ec_err)
1033 ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
1034 pnum);
1035 err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
1036 if (err)
1037 return err;
1038
1039adjust_mean_ec:
1040 if (!ec_err) {
1041 ai->ec_sum += ec;
1042 ai->ec_count += 1;
1043 if (ec > ai->max_ec)
1044 ai->max_ec = ec;
1045 if (ec < ai->min_ec)
1046 ai->min_ec = ec;
1047 }
1048
1049 return 0;
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1064{
1065 struct ubi_ainf_peb *aeb;
1066 int max_corr, peb_count;
1067
1068 peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
1069 max_corr = peb_count / 20 ?: 8;
1070
1071
1072
1073
1074
1075
1076 if (ai->corr_peb_count) {
1077 ubi_err(ubi, "%d PEBs are corrupted and preserved",
1078 ai->corr_peb_count);
1079 pr_err("Corrupted PEBs are:");
1080 list_for_each_entry(aeb, &ai->corr, u.list)
1081 pr_cont(" %d", aeb->pnum);
1082 pr_cont("\n");
1083
1084
1085
1086
1087
1088 if (ai->corr_peb_count >= max_corr) {
1089 ubi_err(ubi, "too many corrupted PEBs, refusing");
1090 return -EINVAL;
1091 }
1092 }
1093
1094 if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110 if (ai->maybe_bad_peb_count <= 2) {
1111 ai->is_empty = 1;
1112 ubi_msg(ubi, "empty MTD device detected");
1113 get_random_bytes(&ubi->image_seq,
1114 sizeof(ubi->image_seq));
1115 } else {
1116 ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
1117 return -EINVAL;
1118 }
1119
1120 }
1121
1122 return 0;
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
1133{
1134 struct ubi_ainf_peb *aeb;
1135 struct rb_node *this = av->root.rb_node;
1136
1137 while (this) {
1138 if (this->rb_left)
1139 this = this->rb_left;
1140 else if (this->rb_right)
1141 this = this->rb_right;
1142 else {
1143 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
1144 this = rb_parent(this);
1145 if (this) {
1146 if (this->rb_left == &aeb->u.rb)
1147 this->rb_left = NULL;
1148 else
1149 this->rb_right = NULL;
1150 }
1151
1152 kmem_cache_free(ai->aeb_slab_cache, aeb);
1153 }
1154 }
1155 kfree(av);
1156}
1157
1158
1159
1160
1161
1162static void destroy_ai(struct ubi_attach_info *ai)
1163{
1164 struct ubi_ainf_peb *aeb, *aeb_tmp;
1165 struct ubi_ainf_volume *av;
1166 struct rb_node *rb;
1167
1168 list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
1169 list_del(&aeb->u.list);
1170 kmem_cache_free(ai->aeb_slab_cache, aeb);
1171 }
1172 list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
1173 list_del(&aeb->u.list);
1174 kmem_cache_free(ai->aeb_slab_cache, aeb);
1175 }
1176 list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
1177 list_del(&aeb->u.list);
1178 kmem_cache_free(ai->aeb_slab_cache, aeb);
1179 }
1180 list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
1181 list_del(&aeb->u.list);
1182 kmem_cache_free(ai->aeb_slab_cache, aeb);
1183 }
1184
1185
1186 rb = ai->volumes.rb_node;
1187 while (rb) {
1188 if (rb->rb_left)
1189 rb = rb->rb_left;
1190 else if (rb->rb_right)
1191 rb = rb->rb_right;
1192 else {
1193 av = rb_entry(rb, struct ubi_ainf_volume, rb);
1194
1195 rb = rb_parent(rb);
1196 if (rb) {
1197 if (rb->rb_left == &av->rb)
1198 rb->rb_left = NULL;
1199 else
1200 rb->rb_right = NULL;
1201 }
1202
1203 destroy_av(ai, av);
1204 }
1205 }
1206
1207 kmem_cache_destroy(ai->aeb_slab_cache);
1208
1209 kfree(ai);
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
1223 int start)
1224{
1225 int err, pnum;
1226 struct rb_node *rb1, *rb2;
1227 struct ubi_ainf_volume *av;
1228 struct ubi_ainf_peb *aeb;
1229
1230 err = -ENOMEM;
1231
1232 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1233 if (!ech)
1234 return err;
1235
1236 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1237 if (!vidh)
1238 goto out_ech;
1239
1240 for (pnum = start; pnum < ubi->peb_count; pnum++) {
1241 cond_resched();
1242
1243 dbg_gen("process PEB %d", pnum);
1244 err = scan_peb(ubi, ai, pnum, NULL, NULL);
1245 if (err < 0)
1246 goto out_vidh;
1247 }
1248
1249 ubi_msg(ubi, "scanning is finished");
1250
1251
1252 if (ai->ec_count)
1253 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
1254
1255 err = late_analysis(ubi, ai);
1256 if (err)
1257 goto out_vidh;
1258
1259
1260
1261
1262
1263 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1264 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1265 if (aeb->ec == UBI_UNKNOWN)
1266 aeb->ec = ai->mean_ec;
1267 }
1268
1269 list_for_each_entry(aeb, &ai->free, u.list) {
1270 if (aeb->ec == UBI_UNKNOWN)
1271 aeb->ec = ai->mean_ec;
1272 }
1273
1274 list_for_each_entry(aeb, &ai->corr, u.list)
1275 if (aeb->ec == UBI_UNKNOWN)
1276 aeb->ec = ai->mean_ec;
1277
1278 list_for_each_entry(aeb, &ai->erase, u.list)
1279 if (aeb->ec == UBI_UNKNOWN)
1280 aeb->ec = ai->mean_ec;
1281
1282 err = self_check_ai(ubi, ai);
1283 if (err)
1284 goto out_vidh;
1285
1286 ubi_free_vid_hdr(ubi, vidh);
1287 kfree(ech);
1288
1289 return 0;
1290
1291out_vidh:
1292 ubi_free_vid_hdr(ubi, vidh);
1293out_ech:
1294 kfree(ech);
1295 return err;
1296}
1297
1298static struct ubi_attach_info *alloc_ai(void)
1299{
1300 struct ubi_attach_info *ai;
1301
1302 ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
1303 if (!ai)
1304 return ai;
1305
1306 INIT_LIST_HEAD(&ai->corr);
1307 INIT_LIST_HEAD(&ai->free);
1308 INIT_LIST_HEAD(&ai->erase);
1309 INIT_LIST_HEAD(&ai->alien);
1310 ai->volumes = RB_ROOT;
1311 ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
1312 sizeof(struct ubi_ainf_peb),
1313 0, 0, NULL);
1314 if (!ai->aeb_slab_cache) {
1315 kfree(ai);
1316 ai = NULL;
1317 }
1318
1319 return ai;
1320}
1321
1322#ifdef CONFIG_MTD_UBI_FASTMAP
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
1335{
1336 int err, pnum, fm_anchor = -1;
1337 unsigned long long max_sqnum = 0;
1338
1339 err = -ENOMEM;
1340
1341 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1342 if (!ech)
1343 goto out;
1344
1345 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1346 if (!vidh)
1347 goto out_ech;
1348
1349 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
1350 int vol_id = -1;
1351 unsigned long long sqnum = -1;
1352 cond_resched();
1353
1354 dbg_gen("process PEB %d", pnum);
1355 err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
1356 if (err < 0)
1357 goto out_vidh;
1358
1359 if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
1360 max_sqnum = sqnum;
1361 fm_anchor = pnum;
1362 }
1363 }
1364
1365 ubi_free_vid_hdr(ubi, vidh);
1366 kfree(ech);
1367
1368 if (fm_anchor < 0)
1369 return UBI_NO_FASTMAP;
1370
1371 destroy_ai(*ai);
1372 *ai = alloc_ai();
1373 if (!*ai)
1374 return -ENOMEM;
1375
1376 return ubi_scan_fastmap(ubi, *ai, fm_anchor);
1377
1378out_vidh:
1379 ubi_free_vid_hdr(ubi, vidh);
1380out_ech:
1381 kfree(ech);
1382out:
1383 return err;
1384}
1385
1386#endif
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396int ubi_attach(struct ubi_device *ubi, int force_scan)
1397{
1398 int err;
1399 struct ubi_attach_info *ai;
1400
1401 ai = alloc_ai();
1402 if (!ai)
1403 return -ENOMEM;
1404
1405#ifdef CONFIG_MTD_UBI_FASTMAP
1406
1407 if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
1408 ubi->fm_disabled = 1;
1409 force_scan = 1;
1410 }
1411
1412 if (force_scan)
1413 err = scan_all(ubi, ai, 0);
1414 else {
1415 err = scan_fast(ubi, &ai);
1416 if (err > 0 || mtd_is_eccerr(err)) {
1417 if (err != UBI_NO_FASTMAP) {
1418 destroy_ai(ai);
1419 ai = alloc_ai();
1420 if (!ai)
1421 return -ENOMEM;
1422
1423 err = scan_all(ubi, ai, 0);
1424 } else {
1425 err = scan_all(ubi, ai, UBI_FM_MAX_START);
1426 }
1427 }
1428 }
1429#else
1430 err = scan_all(ubi, ai, 0);
1431#endif
1432 if (err)
1433 goto out_ai;
1434
1435 ubi->bad_peb_count = ai->bad_peb_count;
1436 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
1437 ubi->corr_peb_count = ai->corr_peb_count;
1438 ubi->max_ec = ai->max_ec;
1439 ubi->mean_ec = ai->mean_ec;
1440 dbg_gen("max. sequence number: %llu", ai->max_sqnum);
1441
1442 err = ubi_read_volume_table(ubi, ai);
1443 if (err)
1444 goto out_ai;
1445
1446 err = ubi_wl_init(ubi, ai);
1447 if (err)
1448 goto out_vtbl;
1449
1450 err = ubi_eba_init(ubi, ai);
1451 if (err)
1452 goto out_wl;
1453
1454#ifdef CONFIG_MTD_UBI_FASTMAP
1455 if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
1456 struct ubi_attach_info *scan_ai;
1457
1458 scan_ai = alloc_ai();
1459 if (!scan_ai) {
1460 err = -ENOMEM;
1461 goto out_wl;
1462 }
1463
1464 err = scan_all(ubi, scan_ai, 0);
1465 if (err) {
1466 destroy_ai(scan_ai);
1467 goto out_wl;
1468 }
1469
1470 err = self_check_eba(ubi, ai, scan_ai);
1471 destroy_ai(scan_ai);
1472
1473 if (err)
1474 goto out_wl;
1475 }
1476#endif
1477
1478 destroy_ai(ai);
1479 return 0;
1480
1481out_wl:
1482 ubi_wl_close(ubi);
1483out_vtbl:
1484 ubi_free_internal_volumes(ubi);
1485 vfree(ubi->vtbl);
1486out_ai:
1487 destroy_ai(ai);
1488 return err;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1500{
1501 int pnum, err, vols_found = 0;
1502 struct rb_node *rb1, *rb2;
1503 struct ubi_ainf_volume *av;
1504 struct ubi_ainf_peb *aeb, *last_aeb;
1505 uint8_t *buf;
1506
1507 if (!ubi_dbg_chk_gen(ubi))
1508 return 0;
1509
1510
1511
1512
1513 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1514 int leb_count = 0;
1515
1516 cond_resched();
1517
1518 vols_found += 1;
1519
1520 if (ai->is_empty) {
1521 ubi_err(ubi, "bad is_empty flag");
1522 goto bad_av;
1523 }
1524
1525 if (av->vol_id < 0 || av->highest_lnum < 0 ||
1526 av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
1527 av->data_pad < 0 || av->last_data_size < 0) {
1528 ubi_err(ubi, "negative values");
1529 goto bad_av;
1530 }
1531
1532 if (av->vol_id >= UBI_MAX_VOLUMES &&
1533 av->vol_id < UBI_INTERNAL_VOL_START) {
1534 ubi_err(ubi, "bad vol_id");
1535 goto bad_av;
1536 }
1537
1538 if (av->vol_id > ai->highest_vol_id) {
1539 ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
1540 ai->highest_vol_id, av->vol_id);
1541 goto out;
1542 }
1543
1544 if (av->vol_type != UBI_DYNAMIC_VOLUME &&
1545 av->vol_type != UBI_STATIC_VOLUME) {
1546 ubi_err(ubi, "bad vol_type");
1547 goto bad_av;
1548 }
1549
1550 if (av->data_pad > ubi->leb_size / 2) {
1551 ubi_err(ubi, "bad data_pad");
1552 goto bad_av;
1553 }
1554
1555 last_aeb = NULL;
1556 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1557 cond_resched();
1558
1559 last_aeb = aeb;
1560 leb_count += 1;
1561
1562 if (aeb->pnum < 0 || aeb->ec < 0) {
1563 ubi_err(ubi, "negative values");
1564 goto bad_aeb;
1565 }
1566
1567 if (aeb->ec < ai->min_ec) {
1568 ubi_err(ubi, "bad ai->min_ec (%d), %d found",
1569 ai->min_ec, aeb->ec);
1570 goto bad_aeb;
1571 }
1572
1573 if (aeb->ec > ai->max_ec) {
1574 ubi_err(ubi, "bad ai->max_ec (%d), %d found",
1575 ai->max_ec, aeb->ec);
1576 goto bad_aeb;
1577 }
1578
1579 if (aeb->pnum >= ubi->peb_count) {
1580 ubi_err(ubi, "too high PEB number %d, total PEBs %d",
1581 aeb->pnum, ubi->peb_count);
1582 goto bad_aeb;
1583 }
1584
1585 if (av->vol_type == UBI_STATIC_VOLUME) {
1586 if (aeb->lnum >= av->used_ebs) {
1587 ubi_err(ubi, "bad lnum or used_ebs");
1588 goto bad_aeb;
1589 }
1590 } else {
1591 if (av->used_ebs != 0) {
1592 ubi_err(ubi, "non-zero used_ebs");
1593 goto bad_aeb;
1594 }
1595 }
1596
1597 if (aeb->lnum > av->highest_lnum) {
1598 ubi_err(ubi, "incorrect highest_lnum or lnum");
1599 goto bad_aeb;
1600 }
1601 }
1602
1603 if (av->leb_count != leb_count) {
1604 ubi_err(ubi, "bad leb_count, %d objects in the tree",
1605 leb_count);
1606 goto bad_av;
1607 }
1608
1609 if (!last_aeb)
1610 continue;
1611
1612 aeb = last_aeb;
1613
1614 if (aeb->lnum != av->highest_lnum) {
1615 ubi_err(ubi, "bad highest_lnum");
1616 goto bad_aeb;
1617 }
1618 }
1619
1620 if (vols_found != ai->vols_found) {
1621 ubi_err(ubi, "bad ai->vols_found %d, should be %d",
1622 ai->vols_found, vols_found);
1623 goto out;
1624 }
1625
1626
1627 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1628 last_aeb = NULL;
1629 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1630 int vol_type;
1631
1632 cond_resched();
1633
1634 last_aeb = aeb;
1635
1636 err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
1637 if (err && err != UBI_IO_BITFLIPS) {
1638 ubi_err(ubi, "VID header is not OK (%d)",
1639 err);
1640 if (err > 0)
1641 err = -EIO;
1642 return err;
1643 }
1644
1645 vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
1646 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
1647 if (av->vol_type != vol_type) {
1648 ubi_err(ubi, "bad vol_type");
1649 goto bad_vid_hdr;
1650 }
1651
1652 if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
1653 ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
1654 goto bad_vid_hdr;
1655 }
1656
1657 if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
1658 ubi_err(ubi, "bad vol_id %d", av->vol_id);
1659 goto bad_vid_hdr;
1660 }
1661
1662 if (av->compat != vidh->compat) {
1663 ubi_err(ubi, "bad compat %d", vidh->compat);
1664 goto bad_vid_hdr;
1665 }
1666
1667 if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
1668 ubi_err(ubi, "bad lnum %d", aeb->lnum);
1669 goto bad_vid_hdr;
1670 }
1671
1672 if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
1673 ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
1674 goto bad_vid_hdr;
1675 }
1676
1677 if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
1678 ubi_err(ubi, "bad data_pad %d", av->data_pad);
1679 goto bad_vid_hdr;
1680 }
1681 }
1682
1683 if (!last_aeb)
1684 continue;
1685
1686 if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
1687 ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
1688 goto bad_vid_hdr;
1689 }
1690
1691 if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
1692 ubi_err(ubi, "bad last_data_size %d",
1693 av->last_data_size);
1694 goto bad_vid_hdr;
1695 }
1696 }
1697
1698
1699
1700
1701
1702 buf = kzalloc(ubi->peb_count, GFP_KERNEL);
1703 if (!buf)
1704 return -ENOMEM;
1705
1706 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
1707 err = ubi_io_is_bad(ubi, pnum);
1708 if (err < 0) {
1709 kfree(buf);
1710 return err;
1711 } else if (err)
1712 buf[pnum] = 1;
1713 }
1714
1715 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
1716 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1717 buf[aeb->pnum] = 1;
1718
1719 list_for_each_entry(aeb, &ai->free, u.list)
1720 buf[aeb->pnum] = 1;
1721
1722 list_for_each_entry(aeb, &ai->corr, u.list)
1723 buf[aeb->pnum] = 1;
1724
1725 list_for_each_entry(aeb, &ai->erase, u.list)
1726 buf[aeb->pnum] = 1;
1727
1728 list_for_each_entry(aeb, &ai->alien, u.list)
1729 buf[aeb->pnum] = 1;
1730
1731 err = 0;
1732 for (pnum = 0; pnum < ubi->peb_count; pnum++)
1733 if (!buf[pnum]) {
1734 ubi_err(ubi, "PEB %d is not referred", pnum);
1735 err = 1;
1736 }
1737
1738 kfree(buf);
1739 if (err)
1740 goto out;
1741 return 0;
1742
1743bad_aeb:
1744 ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
1745 ubi_dump_aeb(aeb, 0);
1746 ubi_dump_av(av);
1747 goto out;
1748
1749bad_av:
1750 ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
1751 ubi_dump_av(av);
1752 goto out;
1753
1754bad_vid_hdr:
1755 ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
1756 ubi_dump_av(av);
1757 ubi_dump_vid_hdr(vidh);
1758
1759out:
1760 dump_stack();
1761 return -EINVAL;
1762}
1763