1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89#include <linux/crc32.h>
90#include <linux/err.h>
91#include <linux/slab.h>
92#include "ubi.h"
93
94static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
95static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
96static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
97 const struct ubi_ec_hdr *ec_hdr);
98static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
99static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
100 const struct ubi_vid_hdr *vid_hdr);
101static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
102 int offset, int len);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
127 int len)
128{
129 int err, retries = 0;
130 size_t read;
131 loff_t addr;
132
133 dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
134
135 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
136 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
137 ubi_assert(len > 0);
138
139 err = self_check_not_bad(ubi, pnum);
140 if (err)
141 return err;
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163 *((uint8_t *)buf) ^= 0xFF;
164
165 addr = (loff_t)pnum * ubi->peb_size + offset;
166retry:
167 err = mtd_read(ubi->mtd, addr, len, &read, buf);
168 if (err) {
169 const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
170
171 if (mtd_is_bitflip(err)) {
172
173
174
175
176
177
178
179
180 ubi_msg("fixable bit-flip detected at PEB %d", pnum);
181 ubi_assert(len == read);
182 return UBI_IO_BITFLIPS;
183 }
184
185 if (retries++ < UBI_IO_RETRIES) {
186 ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
187 err, errstr, len, pnum, offset, read);
188 yield();
189 goto retry;
190 }
191
192 ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
193 err, errstr, len, pnum, offset, read);
194 dump_stack();
195
196
197
198
199
200
201 if (read != len && mtd_is_eccerr(err)) {
202 ubi_assert(0);
203 err = -EIO;
204 }
205 } else {
206 ubi_assert(len == read);
207
208 if (ubi_dbg_is_bitflip(ubi)) {
209 dbg_gen("bit-flip (emulated)");
210 err = UBI_IO_BITFLIPS;
211 }
212 }
213
214 return err;
215}
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
235 int len)
236{
237 int err;
238 size_t written;
239 loff_t addr;
240
241 dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
242
243 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
244 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
245 ubi_assert(offset % ubi->hdrs_min_io_size == 0);
246 ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
247
248 if (ubi->ro_mode) {
249 ubi_err("read-only mode");
250 return -EROFS;
251 }
252
253 err = self_check_not_bad(ubi, pnum);
254 if (err)
255 return err;
256
257
258 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
259 if (err)
260 return err;
261
262 if (offset >= ubi->leb_start) {
263
264
265
266
267 err = self_check_peb_ec_hdr(ubi, pnum);
268 if (err)
269 return err;
270 err = self_check_peb_vid_hdr(ubi, pnum);
271 if (err)
272 return err;
273 }
274
275 if (ubi_dbg_is_write_failure(ubi)) {
276 ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
277 len, pnum, offset);
278 dump_stack();
279 return -EIO;
280 }
281
282 addr = (loff_t)pnum * ubi->peb_size + offset;
283 err = mtd_write(ubi->mtd, addr, len, &written, buf);
284 if (err) {
285 ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
286 err, len, pnum, offset, written);
287 dump_stack();
288 ubi_dump_flash(ubi, pnum, offset, len);
289 } else
290 ubi_assert(written == len);
291
292 if (!err) {
293 err = self_check_write(ubi, buf, pnum, offset, len);
294 if (err)
295 return err;
296
297
298
299
300
301 offset += len;
302 len = ubi->peb_size - offset;
303 if (len)
304 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
305 }
306
307 return err;
308}
309
310
311
312
313
314
315
316
317static void erase_callback(struct erase_info *ei)
318{
319 wake_up_interruptible((wait_queue_head_t *)ei->priv);
320}
321
322
323
324
325
326
327
328
329
330
331static int do_sync_erase(struct ubi_device *ubi, int pnum)
332{
333 int err, retries = 0;
334 struct erase_info ei;
335 wait_queue_head_t wq;
336
337 dbg_io("erase PEB %d", pnum);
338 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
339
340 if (ubi->ro_mode) {
341 ubi_err("read-only mode");
342 return -EROFS;
343 }
344
345retry:
346 init_waitqueue_head(&wq);
347 memset(&ei, 0, sizeof(struct erase_info));
348
349 ei.mtd = ubi->mtd;
350 ei.addr = (loff_t)pnum * ubi->peb_size;
351 ei.len = ubi->peb_size;
352 ei.callback = erase_callback;
353 ei.priv = (unsigned long)&wq;
354
355 err = mtd_erase(ubi->mtd, &ei);
356 if (err) {
357 if (retries++ < UBI_IO_RETRIES) {
358 ubi_warn("error %d while erasing PEB %d, retry",
359 err, pnum);
360 yield();
361 goto retry;
362 }
363 ubi_err("cannot erase PEB %d, error %d", pnum, err);
364 dump_stack();
365 return err;
366 }
367
368 err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
369 ei.state == MTD_ERASE_FAILED);
370 if (err) {
371 ubi_err("interrupted PEB %d erasure", pnum);
372 return -EINTR;
373 }
374
375 if (ei.state == MTD_ERASE_FAILED) {
376 if (retries++ < UBI_IO_RETRIES) {
377 ubi_warn("error while erasing PEB %d, retry", pnum);
378 yield();
379 goto retry;
380 }
381 ubi_err("cannot erase PEB %d", pnum);
382 dump_stack();
383 return -EIO;
384 }
385
386 err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
387 if (err)
388 return err;
389
390 if (ubi_dbg_is_erase_failure(ubi)) {
391 ubi_err("cannot erase PEB %d (emulated)", pnum);
392 return -EIO;
393 }
394
395 return 0;
396}
397
398
399static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
400
401
402
403
404
405
406
407
408
409
410static int torture_peb(struct ubi_device *ubi, int pnum)
411{
412 int err, i, patt_count;
413
414 ubi_msg("run torture test for PEB %d", pnum);
415 patt_count = ARRAY_SIZE(patterns);
416 ubi_assert(patt_count > 0);
417
418 mutex_lock(&ubi->buf_mutex);
419 for (i = 0; i < patt_count; i++) {
420 err = do_sync_erase(ubi, pnum);
421 if (err)
422 goto out;
423
424
425 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
426 if (err)
427 goto out;
428
429 err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
430 if (err == 0) {
431 ubi_err("erased PEB %d, but a non-0xFF byte found",
432 pnum);
433 err = -EIO;
434 goto out;
435 }
436
437
438 memset(ubi->peb_buf, patterns[i], ubi->peb_size);
439 err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
440 if (err)
441 goto out;
442
443 memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
444 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
445 if (err)
446 goto out;
447
448 err = ubi_check_pattern(ubi->peb_buf, patterns[i],
449 ubi->peb_size);
450 if (err == 0) {
451 ubi_err("pattern %x checking failed for PEB %d",
452 patterns[i], pnum);
453 err = -EIO;
454 goto out;
455 }
456 }
457
458 err = patt_count;
459 ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
460
461out:
462 mutex_unlock(&ubi->buf_mutex);
463 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
464
465
466
467
468
469 ubi_err("read problems on freshly erased PEB %d, must be bad",
470 pnum);
471 err = -EIO;
472 }
473 return err;
474}
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
497{
498 int err, err1;
499 size_t written;
500 loff_t addr;
501 uint32_t data = 0;
502
503
504
505
506
507
508
509 struct ubi_vid_hdr vid_hdr;
510
511
512
513
514
515
516
517 addr = (loff_t)pnum * ubi->peb_size;
518 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
519 if (!err) {
520 addr += ubi->vid_hdr_aloffset;
521 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
522 if (!err)
523 return 0;
524 }
525
526
527
528
529
530
531
532
533 err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
534 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
535 err1 == UBI_IO_FF) {
536 struct ubi_ec_hdr ec_hdr;
537
538 err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
539 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
540 err1 == UBI_IO_FF)
541
542
543
544
545
546 return 0;
547 }
548
549
550
551
552
553
554 ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
555 pnum, err, err1);
556 ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
557 return -EIO;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
577{
578 int err, ret = 0;
579
580 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
581
582 err = self_check_not_bad(ubi, pnum);
583 if (err != 0)
584 return err;
585
586 if (ubi->ro_mode) {
587 ubi_err("read-only mode");
588 return -EROFS;
589 }
590
591 if (ubi->nor_flash) {
592 err = nor_erase_prepare(ubi, pnum);
593 if (err)
594 return err;
595 }
596
597 if (torture) {
598 ret = torture_peb(ubi, pnum);
599 if (ret < 0)
600 return ret;
601 }
602
603 err = do_sync_erase(ubi, pnum);
604 if (err)
605 return err;
606
607 return ret + 1;
608}
609
610
611
612
613
614
615
616
617
618int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
619{
620 struct mtd_info *mtd = ubi->mtd;
621
622 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
623
624 if (ubi->bad_allowed) {
625 int ret;
626
627 ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
628 if (ret < 0)
629 ubi_err("error %d while checking if PEB %d is bad",
630 ret, pnum);
631 else if (ret)
632 dbg_io("PEB %d is bad", pnum);
633 return ret;
634 }
635
636 return 0;
637}
638
639
640
641
642
643
644
645
646
647int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
648{
649 int err;
650 struct mtd_info *mtd = ubi->mtd;
651
652 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
653
654 if (ubi->ro_mode) {
655 ubi_err("read-only mode");
656 return -EROFS;
657 }
658
659 if (!ubi->bad_allowed)
660 return 0;
661
662 err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
663 if (err)
664 ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
665 return err;
666}
667
668
669
670
671
672
673
674
675
676static int validate_ec_hdr(const struct ubi_device *ubi,
677 const struct ubi_ec_hdr *ec_hdr)
678{
679 long long ec;
680 int vid_hdr_offset, leb_start;
681
682 ec = be64_to_cpu(ec_hdr->ec);
683 vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
684 leb_start = be32_to_cpu(ec_hdr->data_offset);
685
686 if (ec_hdr->version != UBI_VERSION) {
687 ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
688 UBI_VERSION, (int)ec_hdr->version);
689 goto bad;
690 }
691
692 if (vid_hdr_offset != ubi->vid_hdr_offset) {
693 ubi_err("bad VID header offset %d, expected %d",
694 vid_hdr_offset, ubi->vid_hdr_offset);
695 goto bad;
696 }
697
698 if (leb_start != ubi->leb_start) {
699 ubi_err("bad data offset %d, expected %d",
700 leb_start, ubi->leb_start);
701 goto bad;
702 }
703
704 if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
705 ubi_err("bad erase counter %lld", ec);
706 goto bad;
707 }
708
709 return 0;
710
711bad:
712 ubi_err("bad EC header");
713 ubi_dump_ec_hdr(ec_hdr);
714 dump_stack();
715 return 1;
716}
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
741 struct ubi_ec_hdr *ec_hdr, int verbose)
742{
743 int err, read_err;
744 uint32_t crc, magic, hdr_crc;
745
746 dbg_io("read EC header from PEB %d", pnum);
747 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
748
749 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
750 if (read_err) {
751 if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
752 return read_err;
753
754
755
756
757
758
759
760
761
762
763 }
764
765 magic = be32_to_cpu(ec_hdr->magic);
766 if (magic != UBI_EC_HDR_MAGIC) {
767 if (mtd_is_eccerr(read_err))
768 return UBI_IO_BAD_HDR_EBADMSG;
769
770
771
772
773
774
775 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
776
777 if (verbose)
778 ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
779 pnum);
780 dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
781 pnum);
782 if (!read_err)
783 return UBI_IO_FF;
784 else
785 return UBI_IO_FF_BITFLIPS;
786 }
787
788
789
790
791
792 if (verbose) {
793 ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
794 pnum, magic, UBI_EC_HDR_MAGIC);
795 ubi_dump_ec_hdr(ec_hdr);
796 }
797 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
798 pnum, magic, UBI_EC_HDR_MAGIC);
799 return UBI_IO_BAD_HDR;
800 }
801
802 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
803 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
804
805 if (hdr_crc != crc) {
806 if (verbose) {
807 ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
808 pnum, crc, hdr_crc);
809 ubi_dump_ec_hdr(ec_hdr);
810 }
811 dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
812 pnum, crc, hdr_crc);
813
814 if (!read_err)
815 return UBI_IO_BAD_HDR;
816 else
817 return UBI_IO_BAD_HDR_EBADMSG;
818 }
819
820
821 err = validate_ec_hdr(ubi, ec_hdr);
822 if (err) {
823 ubi_err("validation failed for PEB %d", pnum);
824 return -EINVAL;
825 }
826
827
828
829
830
831 return read_err ? UBI_IO_BITFLIPS : 0;
832}
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
850 struct ubi_ec_hdr *ec_hdr)
851{
852 int err;
853 uint32_t crc;
854
855 dbg_io("write EC header to PEB %d", pnum);
856 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
857
858 ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
859 ec_hdr->version = UBI_VERSION;
860 ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
861 ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
862 ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
863 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
864 ec_hdr->hdr_crc = cpu_to_be32(crc);
865
866 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
867 if (err)
868 return err;
869
870 err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
871 return err;
872}
873
874
875
876
877
878
879
880
881
882static int validate_vid_hdr(const struct ubi_device *ubi,
883 const struct ubi_vid_hdr *vid_hdr)
884{
885 int vol_type = vid_hdr->vol_type;
886 int copy_flag = vid_hdr->copy_flag;
887 int vol_id = be32_to_cpu(vid_hdr->vol_id);
888 int lnum = be32_to_cpu(vid_hdr->lnum);
889 int compat = vid_hdr->compat;
890 int data_size = be32_to_cpu(vid_hdr->data_size);
891 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
892 int data_pad = be32_to_cpu(vid_hdr->data_pad);
893 int data_crc = be32_to_cpu(vid_hdr->data_crc);
894 int usable_leb_size = ubi->leb_size - data_pad;
895
896 if (copy_flag != 0 && copy_flag != 1) {
897 ubi_err("bad copy_flag");
898 goto bad;
899 }
900
901 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
902 data_pad < 0) {
903 ubi_err("negative values");
904 goto bad;
905 }
906
907 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
908 ubi_err("bad vol_id");
909 goto bad;
910 }
911
912 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
913 ubi_err("bad compat");
914 goto bad;
915 }
916
917 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
918 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
919 compat != UBI_COMPAT_REJECT) {
920 ubi_err("bad compat");
921 goto bad;
922 }
923
924 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
925 ubi_err("bad vol_type");
926 goto bad;
927 }
928
929 if (data_pad >= ubi->leb_size / 2) {
930 ubi_err("bad data_pad");
931 goto bad;
932 }
933
934 if (vol_type == UBI_VID_STATIC) {
935
936
937
938
939
940
941 if (used_ebs == 0) {
942 ubi_err("zero used_ebs");
943 goto bad;
944 }
945 if (data_size == 0) {
946 ubi_err("zero data_size");
947 goto bad;
948 }
949 if (lnum < used_ebs - 1) {
950 if (data_size != usable_leb_size) {
951 ubi_err("bad data_size");
952 goto bad;
953 }
954 } else if (lnum == used_ebs - 1) {
955 if (data_size == 0) {
956 ubi_err("bad data_size at last LEB");
957 goto bad;
958 }
959 } else {
960 ubi_err("too high lnum");
961 goto bad;
962 }
963 } else {
964 if (copy_flag == 0) {
965 if (data_crc != 0) {
966 ubi_err("non-zero data CRC");
967 goto bad;
968 }
969 if (data_size != 0) {
970 ubi_err("non-zero data_size");
971 goto bad;
972 }
973 } else {
974 if (data_size == 0) {
975 ubi_err("zero data_size of copy");
976 goto bad;
977 }
978 }
979 if (used_ebs != 0) {
980 ubi_err("bad used_ebs");
981 goto bad;
982 }
983 }
984
985 return 0;
986
987bad:
988 ubi_err("bad VID header");
989 ubi_dump_vid_hdr(vid_hdr);
990 dump_stack();
991 return 1;
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1011 struct ubi_vid_hdr *vid_hdr, int verbose)
1012{
1013 int err, read_err;
1014 uint32_t crc, magic, hdr_crc;
1015 void *p;
1016
1017 dbg_io("read VID header from PEB %d", pnum);
1018 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1019
1020 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1021 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1022 ubi->vid_hdr_alsize);
1023 if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
1024 return read_err;
1025
1026 magic = be32_to_cpu(vid_hdr->magic);
1027 if (magic != UBI_VID_HDR_MAGIC) {
1028 if (mtd_is_eccerr(read_err))
1029 return UBI_IO_BAD_HDR_EBADMSG;
1030
1031 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1032 if (verbose)
1033 ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
1034 pnum);
1035 dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
1036 pnum);
1037 if (!read_err)
1038 return UBI_IO_FF;
1039 else
1040 return UBI_IO_FF_BITFLIPS;
1041 }
1042
1043 if (verbose) {
1044 ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
1045 pnum, magic, UBI_VID_HDR_MAGIC);
1046 ubi_dump_vid_hdr(vid_hdr);
1047 }
1048 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
1049 pnum, magic, UBI_VID_HDR_MAGIC);
1050 return UBI_IO_BAD_HDR;
1051 }
1052
1053 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1054 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1055
1056 if (hdr_crc != crc) {
1057 if (verbose) {
1058 ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
1059 pnum, crc, hdr_crc);
1060 ubi_dump_vid_hdr(vid_hdr);
1061 }
1062 dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
1063 pnum, crc, hdr_crc);
1064 if (!read_err)
1065 return UBI_IO_BAD_HDR;
1066 else
1067 return UBI_IO_BAD_HDR_EBADMSG;
1068 }
1069
1070 err = validate_vid_hdr(ubi, vid_hdr);
1071 if (err) {
1072 ubi_err("validation failed for PEB %d", pnum);
1073 return -EINVAL;
1074 }
1075
1076 return read_err ? UBI_IO_BITFLIPS : 0;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1095 struct ubi_vid_hdr *vid_hdr)
1096{
1097 int err;
1098 uint32_t crc;
1099 void *p;
1100
1101 dbg_io("write VID header to PEB %d", pnum);
1102 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1103
1104 err = self_check_peb_ec_hdr(ubi, pnum);
1105 if (err)
1106 return err;
1107
1108 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
1109 vid_hdr->version = UBI_VERSION;
1110 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1111 vid_hdr->hdr_crc = cpu_to_be32(crc);
1112
1113 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1114 if (err)
1115 return err;
1116
1117 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1118 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
1119 ubi->vid_hdr_alsize);
1120 return err;
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
1132{
1133 int err;
1134
1135 if (!ubi->dbg->chk_io)
1136 return 0;
1137
1138 err = ubi_io_is_bad(ubi, pnum);
1139 if (!err)
1140 return err;
1141
1142 ubi_err("self-check failed for PEB %d", pnum);
1143 dump_stack();
1144 return err > 0 ? -EINVAL : err;
1145}
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1157 const struct ubi_ec_hdr *ec_hdr)
1158{
1159 int err;
1160 uint32_t magic;
1161
1162 if (!ubi->dbg->chk_io)
1163 return 0;
1164
1165 magic = be32_to_cpu(ec_hdr->magic);
1166 if (magic != UBI_EC_HDR_MAGIC) {
1167 ubi_err("bad magic %#08x, must be %#08x",
1168 magic, UBI_EC_HDR_MAGIC);
1169 goto fail;
1170 }
1171
1172 err = validate_ec_hdr(ubi, ec_hdr);
1173 if (err) {
1174 ubi_err("self-check failed for PEB %d", pnum);
1175 goto fail;
1176 }
1177
1178 return 0;
1179
1180fail:
1181 ubi_dump_ec_hdr(ec_hdr);
1182 dump_stack();
1183 return -EINVAL;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1195{
1196 int err;
1197 uint32_t crc, hdr_crc;
1198 struct ubi_ec_hdr *ec_hdr;
1199
1200 if (!ubi->dbg->chk_io)
1201 return 0;
1202
1203 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1204 if (!ec_hdr)
1205 return -ENOMEM;
1206
1207 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
1208 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1209 goto exit;
1210
1211 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
1212 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
1213 if (hdr_crc != crc) {
1214 ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
1215 ubi_err("self-check failed for PEB %d", pnum);
1216 ubi_dump_ec_hdr(ec_hdr);
1217 dump_stack();
1218 err = -EINVAL;
1219 goto exit;
1220 }
1221
1222 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
1223
1224exit:
1225 kfree(ec_hdr);
1226 return err;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1239 const struct ubi_vid_hdr *vid_hdr)
1240{
1241 int err;
1242 uint32_t magic;
1243
1244 if (!ubi->dbg->chk_io)
1245 return 0;
1246
1247 magic = be32_to_cpu(vid_hdr->magic);
1248 if (magic != UBI_VID_HDR_MAGIC) {
1249 ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
1250 magic, pnum, UBI_VID_HDR_MAGIC);
1251 goto fail;
1252 }
1253
1254 err = validate_vid_hdr(ubi, vid_hdr);
1255 if (err) {
1256 ubi_err("self-check failed for PEB %d", pnum);
1257 goto fail;
1258 }
1259
1260 return err;
1261
1262fail:
1263 ubi_err("self-check failed for PEB %d", pnum);
1264 ubi_dump_vid_hdr(vid_hdr);
1265 dump_stack();
1266 return -EINVAL;
1267
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1279{
1280 int err;
1281 uint32_t crc, hdr_crc;
1282 struct ubi_vid_hdr *vid_hdr;
1283 void *p;
1284
1285 if (!ubi->dbg->chk_io)
1286 return 0;
1287
1288 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1289 if (!vid_hdr)
1290 return -ENOMEM;
1291
1292 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1293 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1294 ubi->vid_hdr_alsize);
1295 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1296 goto exit;
1297
1298 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
1299 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1300 if (hdr_crc != crc) {
1301 ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
1302 pnum, crc, hdr_crc);
1303 ubi_err("self-check failed for PEB %d", pnum);
1304 ubi_dump_vid_hdr(vid_hdr);
1305 dump_stack();
1306 err = -EINVAL;
1307 goto exit;
1308 }
1309
1310 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1311
1312exit:
1313 ubi_free_vid_hdr(ubi, vid_hdr);
1314 return err;
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1330 int offset, int len)
1331{
1332 int err, i;
1333 size_t read;
1334 void *buf1;
1335 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1336
1337 if (!ubi->dbg->chk_io)
1338 return 0;
1339
1340 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1341 if (!buf1) {
1342 ubi_err("cannot allocate memory to check writes");
1343 return 0;
1344 }
1345
1346 err = mtd_read(ubi->mtd, addr, len, &read, buf1);
1347 if (err && !mtd_is_bitflip(err))
1348 goto out_free;
1349
1350 for (i = 0; i < len; i++) {
1351 uint8_t c = ((uint8_t *)buf)[i];
1352 uint8_t c1 = ((uint8_t *)buf1)[i];
1353 int dump_len;
1354
1355 if (c == c1)
1356 continue;
1357
1358 ubi_err("self-check failed for PEB %d:%d, len %d",
1359 pnum, offset, len);
1360 ubi_msg("data differ at position %d", i);
1361 dump_len = max_t(int, 128, len - i);
1362 ubi_msg("hex dump of the original buffer from %d to %d",
1363 i, i + dump_len);
1364 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1365 buf + i, dump_len, 1);
1366 ubi_msg("hex dump of the read buffer from %d to %d",
1367 i, i + dump_len);
1368 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1369 buf1 + i, dump_len, 1);
1370 dump_stack();
1371 err = -EINVAL;
1372 goto out_free;
1373 }
1374
1375 vfree(buf1);
1376 return 0;
1377
1378out_free:
1379 vfree(buf1);
1380 return err;
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1395{
1396 size_t read;
1397 int err;
1398 void *buf;
1399 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1400
1401 if (!ubi->dbg->chk_io)
1402 return 0;
1403
1404 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1405 if (!buf) {
1406 ubi_err("cannot allocate memory to check for 0xFFs");
1407 return 0;
1408 }
1409
1410 err = mtd_read(ubi->mtd, addr, len, &read, buf);
1411 if (err && !mtd_is_bitflip(err)) {
1412 ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
1413 err, len, pnum, offset, read);
1414 goto error;
1415 }
1416
1417 err = ubi_check_pattern(buf, 0xFF, len);
1418 if (err == 0) {
1419 ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
1420 pnum, offset, len);
1421 goto fail;
1422 }
1423
1424 vfree(buf);
1425 return 0;
1426
1427fail:
1428 ubi_err("self-check failed for PEB %d", pnum);
1429 ubi_msg("hex dump of the %d-%d region", offset, offset + len);
1430 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1431 err = -EINVAL;
1432error:
1433 dump_stack();
1434 vfree(buf);
1435 return err;
1436}
1437