1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89#include <linux/crc32.h>
90#include <linux/err.h>
91#include <linux/slab.h>
92#include "ubi.h"
93
94static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
95static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
96static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
97 const struct ubi_ec_hdr *ec_hdr);
98static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
99static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
100 const struct ubi_vid_hdr *vid_hdr);
101static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
102 int offset, int len);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
127 int len)
128{
129 int err, retries = 0;
130 size_t read;
131 loff_t addr;
132
133 dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
134
135 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
136 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
137 ubi_assert(len > 0);
138
139 err = self_check_not_bad(ubi, pnum);
140 if (err)
141 return err;
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163 *((uint8_t *)buf) ^= 0xFF;
164
165 addr = (loff_t)pnum * ubi->peb_size + offset;
166retry:
167 err = mtd_read(ubi->mtd, addr, len, &read, buf);
168 if (err) {
169 const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
170
171 if (mtd_is_bitflip(err)) {
172
173
174
175
176
177
178
179
180 ubi_msg("fixable bit-flip detected at PEB %d", pnum);
181 ubi_assert(len == read);
182 return UBI_IO_BITFLIPS;
183 }
184
185 if (retries++ < UBI_IO_RETRIES) {
186 ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
187 err, errstr, len, pnum, offset, read);
188 yield();
189 goto retry;
190 }
191
192 ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
193 err, errstr, len, pnum, offset, read);
194 dump_stack();
195
196
197
198
199
200
201 if (read != len && mtd_is_eccerr(err)) {
202 ubi_assert(0);
203 err = -EIO;
204 }
205 } else {
206 ubi_assert(len == read);
207
208 if (ubi_dbg_is_bitflip(ubi)) {
209 dbg_gen("bit-flip (emulated)");
210 err = UBI_IO_BITFLIPS;
211 }
212 }
213
214 return err;
215}
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
235 int len)
236{
237 int err;
238 size_t written;
239 loff_t addr;
240
241 dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
242
243 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
244 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
245 ubi_assert(offset % ubi->hdrs_min_io_size == 0);
246 ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
247
248 if (ubi->ro_mode) {
249 ubi_err("read-only mode");
250 return -EROFS;
251 }
252
253 err = self_check_not_bad(ubi, pnum);
254 if (err)
255 return err;
256
257
258 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
259 if (err)
260 return err;
261
262 if (offset >= ubi->leb_start) {
263
264
265
266
267 err = self_check_peb_ec_hdr(ubi, pnum);
268 if (err)
269 return err;
270 err = self_check_peb_vid_hdr(ubi, pnum);
271 if (err)
272 return err;
273 }
274
275 if (ubi_dbg_is_write_failure(ubi)) {
276 ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
277 len, pnum, offset);
278 dump_stack();
279 return -EIO;
280 }
281
282 addr = (loff_t)pnum * ubi->peb_size + offset;
283 err = mtd_write(ubi->mtd, addr, len, &written, buf);
284 if (err) {
285 ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
286 err, len, pnum, offset, written);
287 dump_stack();
288 ubi_dump_flash(ubi, pnum, offset, len);
289 } else
290 ubi_assert(written == len);
291
292 if (!err) {
293 err = self_check_write(ubi, buf, pnum, offset, len);
294 if (err)
295 return err;
296
297
298
299
300
301 offset += len;
302 len = ubi->peb_size - offset;
303 if (len)
304 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
305 }
306
307 return err;
308}
309
310
311
312
313
314
315
316
317static void erase_callback(struct erase_info *ei)
318{
319 wake_up_interruptible((wait_queue_head_t *)ei->priv);
320}
321
322
323
324
325
326
327
328
329
330
331static int do_sync_erase(struct ubi_device *ubi, int pnum)
332{
333 int err, retries = 0;
334 struct erase_info ei;
335 wait_queue_head_t wq;
336
337 dbg_io("erase PEB %d", pnum);
338 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
339
340 if (ubi->ro_mode) {
341 ubi_err("read-only mode");
342 return -EROFS;
343 }
344
345retry:
346 init_waitqueue_head(&wq);
347 memset(&ei, 0, sizeof(struct erase_info));
348
349 ei.mtd = ubi->mtd;
350 ei.addr = (loff_t)pnum * ubi->peb_size;
351 ei.len = ubi->peb_size;
352 ei.callback = erase_callback;
353 ei.priv = (unsigned long)&wq;
354
355 err = mtd_erase(ubi->mtd, &ei);
356 if (err) {
357 if (retries++ < UBI_IO_RETRIES) {
358 ubi_warn("error %d while erasing PEB %d, retry",
359 err, pnum);
360 yield();
361 goto retry;
362 }
363 ubi_err("cannot erase PEB %d, error %d", pnum, err);
364 dump_stack();
365 return err;
366 }
367
368 err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
369 ei.state == MTD_ERASE_FAILED);
370 if (err) {
371 ubi_err("interrupted PEB %d erasure", pnum);
372 return -EINTR;
373 }
374
375 if (ei.state == MTD_ERASE_FAILED) {
376 if (retries++ < UBI_IO_RETRIES) {
377 ubi_warn("error while erasing PEB %d, retry", pnum);
378 yield();
379 goto retry;
380 }
381 ubi_err("cannot erase PEB %d", pnum);
382 dump_stack();
383 return -EIO;
384 }
385
386 err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
387 if (err)
388 return err;
389
390 if (ubi_dbg_is_erase_failure(ubi)) {
391 ubi_err("cannot erase PEB %d (emulated)", pnum);
392 return -EIO;
393 }
394
395 return 0;
396}
397
398
399static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
400
401
402
403
404
405
406
407
408
409
410static int torture_peb(struct ubi_device *ubi, int pnum)
411{
412 int err, i, patt_count;
413
414 ubi_msg("run torture test for PEB %d", pnum);
415 patt_count = ARRAY_SIZE(patterns);
416 ubi_assert(patt_count > 0);
417
418 mutex_lock(&ubi->buf_mutex);
419 for (i = 0; i < patt_count; i++) {
420 err = do_sync_erase(ubi, pnum);
421 if (err)
422 goto out;
423
424
425 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
426 if (err)
427 goto out;
428
429 err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
430 if (err == 0) {
431 ubi_err("erased PEB %d, but a non-0xFF byte found",
432 pnum);
433 err = -EIO;
434 goto out;
435 }
436
437
438 memset(ubi->peb_buf, patterns[i], ubi->peb_size);
439 err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
440 if (err)
441 goto out;
442
443 memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
444 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
445 if (err)
446 goto out;
447
448 err = ubi_check_pattern(ubi->peb_buf, patterns[i],
449 ubi->peb_size);
450 if (err == 0) {
451 ubi_err("pattern %x checking failed for PEB %d",
452 patterns[i], pnum);
453 err = -EIO;
454 goto out;
455 }
456 }
457
458 err = patt_count;
459 ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
460
461out:
462 mutex_unlock(&ubi->buf_mutex);
463 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
464
465
466
467
468
469 ubi_err("read problems on freshly erased PEB %d, must be bad",
470 pnum);
471 err = -EIO;
472 }
473 return err;
474}
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
497{
498 int err;
499 size_t written;
500 loff_t addr;
501 uint32_t data = 0;
502 struct ubi_ec_hdr ec_hdr;
503
504
505
506
507
508
509
510
511 struct ubi_vid_hdr vid_hdr;
512
513
514
515
516
517
518
519
520 addr = (loff_t)pnum * ubi->peb_size;
521 err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
522 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
523 err != UBI_IO_FF){
524 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
525 if(err)
526 goto error;
527 }
528
529 err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
530 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
531 err != UBI_IO_FF){
532 addr += ubi->vid_hdr_aloffset;
533 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
534 if (err)
535 goto error;
536 }
537 return 0;
538
539error:
540
541
542
543
544
545 ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
546 ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
547 return -EIO;
548}
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
567{
568 int err, ret = 0;
569
570 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
571
572 err = self_check_not_bad(ubi, pnum);
573 if (err != 0)
574 return err;
575
576 if (ubi->ro_mode) {
577 ubi_err("read-only mode");
578 return -EROFS;
579 }
580
581 if (ubi->nor_flash) {
582 err = nor_erase_prepare(ubi, pnum);
583 if (err)
584 return err;
585 }
586
587 if (torture) {
588 ret = torture_peb(ubi, pnum);
589 if (ret < 0)
590 return ret;
591 }
592
593 err = do_sync_erase(ubi, pnum);
594 if (err)
595 return err;
596
597 return ret + 1;
598}
599
600
601
602
603
604
605
606
607
608int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
609{
610 struct mtd_info *mtd = ubi->mtd;
611
612 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
613
614 if (ubi->bad_allowed) {
615 int ret;
616
617 ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
618 if (ret < 0)
619 ubi_err("error %d while checking if PEB %d is bad",
620 ret, pnum);
621 else if (ret)
622 dbg_io("PEB %d is bad", pnum);
623 return ret;
624 }
625
626 return 0;
627}
628
629
630
631
632
633
634
635
636
637int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
638{
639 int err;
640 struct mtd_info *mtd = ubi->mtd;
641
642 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
643
644 if (ubi->ro_mode) {
645 ubi_err("read-only mode");
646 return -EROFS;
647 }
648
649 if (!ubi->bad_allowed)
650 return 0;
651
652 err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
653 if (err)
654 ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
655 return err;
656}
657
658
659
660
661
662
663
664
665
666static int validate_ec_hdr(const struct ubi_device *ubi,
667 const struct ubi_ec_hdr *ec_hdr)
668{
669 long long ec;
670 int vid_hdr_offset, leb_start;
671
672 ec = be64_to_cpu(ec_hdr->ec);
673 vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
674 leb_start = be32_to_cpu(ec_hdr->data_offset);
675
676 if (ec_hdr->version != UBI_VERSION) {
677 ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
678 UBI_VERSION, (int)ec_hdr->version);
679 goto bad;
680 }
681
682 if (vid_hdr_offset != ubi->vid_hdr_offset) {
683 ubi_err("bad VID header offset %d, expected %d",
684 vid_hdr_offset, ubi->vid_hdr_offset);
685 goto bad;
686 }
687
688 if (leb_start != ubi->leb_start) {
689 ubi_err("bad data offset %d, expected %d",
690 leb_start, ubi->leb_start);
691 goto bad;
692 }
693
694 if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
695 ubi_err("bad erase counter %lld", ec);
696 goto bad;
697 }
698
699 return 0;
700
701bad:
702 ubi_err("bad EC header");
703 ubi_dump_ec_hdr(ec_hdr);
704 dump_stack();
705 return 1;
706}
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
731 struct ubi_ec_hdr *ec_hdr, int verbose)
732{
733 int err, read_err;
734 uint32_t crc, magic, hdr_crc;
735
736 dbg_io("read EC header from PEB %d", pnum);
737 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
738
739 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
740 if (read_err) {
741 if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
742 return read_err;
743
744
745
746
747
748
749
750
751
752
753 }
754
755 magic = be32_to_cpu(ec_hdr->magic);
756 if (magic != UBI_EC_HDR_MAGIC) {
757 if (mtd_is_eccerr(read_err))
758 return UBI_IO_BAD_HDR_EBADMSG;
759
760
761
762
763
764
765 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
766
767 if (verbose)
768 ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
769 pnum);
770 dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
771 pnum);
772 if (!read_err)
773 return UBI_IO_FF;
774 else
775 return UBI_IO_FF_BITFLIPS;
776 }
777
778
779
780
781
782 if (verbose) {
783 ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
784 pnum, magic, UBI_EC_HDR_MAGIC);
785 ubi_dump_ec_hdr(ec_hdr);
786 }
787 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
788 pnum, magic, UBI_EC_HDR_MAGIC);
789 return UBI_IO_BAD_HDR;
790 }
791
792 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
793 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
794
795 if (hdr_crc != crc) {
796 if (verbose) {
797 ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
798 pnum, crc, hdr_crc);
799 ubi_dump_ec_hdr(ec_hdr);
800 }
801 dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
802 pnum, crc, hdr_crc);
803
804 if (!read_err)
805 return UBI_IO_BAD_HDR;
806 else
807 return UBI_IO_BAD_HDR_EBADMSG;
808 }
809
810
811 err = validate_ec_hdr(ubi, ec_hdr);
812 if (err) {
813 ubi_err("validation failed for PEB %d", pnum);
814 return -EINVAL;
815 }
816
817
818
819
820
821 return read_err ? UBI_IO_BITFLIPS : 0;
822}
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
840 struct ubi_ec_hdr *ec_hdr)
841{
842 int err;
843 uint32_t crc;
844
845 dbg_io("write EC header to PEB %d", pnum);
846 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
847
848 ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
849 ec_hdr->version = UBI_VERSION;
850 ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
851 ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
852 ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
853 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
854 ec_hdr->hdr_crc = cpu_to_be32(crc);
855
856 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
857 if (err)
858 return err;
859
860 err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
861 return err;
862}
863
864
865
866
867
868
869
870
871
872static int validate_vid_hdr(const struct ubi_device *ubi,
873 const struct ubi_vid_hdr *vid_hdr)
874{
875 int vol_type = vid_hdr->vol_type;
876 int copy_flag = vid_hdr->copy_flag;
877 int vol_id = be32_to_cpu(vid_hdr->vol_id);
878 int lnum = be32_to_cpu(vid_hdr->lnum);
879 int compat = vid_hdr->compat;
880 int data_size = be32_to_cpu(vid_hdr->data_size);
881 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
882 int data_pad = be32_to_cpu(vid_hdr->data_pad);
883 int data_crc = be32_to_cpu(vid_hdr->data_crc);
884 int usable_leb_size = ubi->leb_size - data_pad;
885
886 if (copy_flag != 0 && copy_flag != 1) {
887 ubi_err("bad copy_flag");
888 goto bad;
889 }
890
891 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
892 data_pad < 0) {
893 ubi_err("negative values");
894 goto bad;
895 }
896
897 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
898 ubi_err("bad vol_id");
899 goto bad;
900 }
901
902 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
903 ubi_err("bad compat");
904 goto bad;
905 }
906
907 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
908 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
909 compat != UBI_COMPAT_REJECT) {
910 ubi_err("bad compat");
911 goto bad;
912 }
913
914 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
915 ubi_err("bad vol_type");
916 goto bad;
917 }
918
919 if (data_pad >= ubi->leb_size / 2) {
920 ubi_err("bad data_pad");
921 goto bad;
922 }
923
924 if (vol_type == UBI_VID_STATIC) {
925
926
927
928
929
930
931 if (used_ebs == 0) {
932 ubi_err("zero used_ebs");
933 goto bad;
934 }
935 if (data_size == 0) {
936 ubi_err("zero data_size");
937 goto bad;
938 }
939 if (lnum < used_ebs - 1) {
940 if (data_size != usable_leb_size) {
941 ubi_err("bad data_size");
942 goto bad;
943 }
944 } else if (lnum == used_ebs - 1) {
945 if (data_size == 0) {
946 ubi_err("bad data_size at last LEB");
947 goto bad;
948 }
949 } else {
950 ubi_err("too high lnum");
951 goto bad;
952 }
953 } else {
954 if (copy_flag == 0) {
955 if (data_crc != 0) {
956 ubi_err("non-zero data CRC");
957 goto bad;
958 }
959 if (data_size != 0) {
960 ubi_err("non-zero data_size");
961 goto bad;
962 }
963 } else {
964 if (data_size == 0) {
965 ubi_err("zero data_size of copy");
966 goto bad;
967 }
968 }
969 if (used_ebs != 0) {
970 ubi_err("bad used_ebs");
971 goto bad;
972 }
973 }
974
975 return 0;
976
977bad:
978 ubi_err("bad VID header");
979 ubi_dump_vid_hdr(vid_hdr);
980 dump_stack();
981 return 1;
982}
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1001 struct ubi_vid_hdr *vid_hdr, int verbose)
1002{
1003 int err, read_err;
1004 uint32_t crc, magic, hdr_crc;
1005 void *p;
1006
1007 dbg_io("read VID header from PEB %d", pnum);
1008 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1009
1010 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1011 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1012 ubi->vid_hdr_alsize);
1013 if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
1014 return read_err;
1015
1016 magic = be32_to_cpu(vid_hdr->magic);
1017 if (magic != UBI_VID_HDR_MAGIC) {
1018 if (mtd_is_eccerr(read_err))
1019 return UBI_IO_BAD_HDR_EBADMSG;
1020
1021 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1022 if (verbose)
1023 ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
1024 pnum);
1025 dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
1026 pnum);
1027 if (!read_err)
1028 return UBI_IO_FF;
1029 else
1030 return UBI_IO_FF_BITFLIPS;
1031 }
1032
1033 if (verbose) {
1034 ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
1035 pnum, magic, UBI_VID_HDR_MAGIC);
1036 ubi_dump_vid_hdr(vid_hdr);
1037 }
1038 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
1039 pnum, magic, UBI_VID_HDR_MAGIC);
1040 return UBI_IO_BAD_HDR;
1041 }
1042
1043 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1044 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1045
1046 if (hdr_crc != crc) {
1047 if (verbose) {
1048 ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
1049 pnum, crc, hdr_crc);
1050 ubi_dump_vid_hdr(vid_hdr);
1051 }
1052 dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
1053 pnum, crc, hdr_crc);
1054 if (!read_err)
1055 return UBI_IO_BAD_HDR;
1056 else
1057 return UBI_IO_BAD_HDR_EBADMSG;
1058 }
1059
1060 err = validate_vid_hdr(ubi, vid_hdr);
1061 if (err) {
1062 ubi_err("validation failed for PEB %d", pnum);
1063 return -EINVAL;
1064 }
1065
1066 return read_err ? UBI_IO_BITFLIPS : 0;
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1085 struct ubi_vid_hdr *vid_hdr)
1086{
1087 int err;
1088 uint32_t crc;
1089 void *p;
1090
1091 dbg_io("write VID header to PEB %d", pnum);
1092 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1093
1094 err = self_check_peb_ec_hdr(ubi, pnum);
1095 if (err)
1096 return err;
1097
1098 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
1099 vid_hdr->version = UBI_VERSION;
1100 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1101 vid_hdr->hdr_crc = cpu_to_be32(crc);
1102
1103 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1104 if (err)
1105 return err;
1106
1107 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1108 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
1109 ubi->vid_hdr_alsize);
1110 return err;
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
1122{
1123 int err;
1124
1125 if (!ubi_dbg_chk_io(ubi))
1126 return 0;
1127
1128 err = ubi_io_is_bad(ubi, pnum);
1129 if (!err)
1130 return err;
1131
1132 ubi_err("self-check failed for PEB %d", pnum);
1133 dump_stack();
1134 return err > 0 ? -EINVAL : err;
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1147 const struct ubi_ec_hdr *ec_hdr)
1148{
1149 int err;
1150 uint32_t magic;
1151
1152 if (!ubi_dbg_chk_io(ubi))
1153 return 0;
1154
1155 magic = be32_to_cpu(ec_hdr->magic);
1156 if (magic != UBI_EC_HDR_MAGIC) {
1157 ubi_err("bad magic %#08x, must be %#08x",
1158 magic, UBI_EC_HDR_MAGIC);
1159 goto fail;
1160 }
1161
1162 err = validate_ec_hdr(ubi, ec_hdr);
1163 if (err) {
1164 ubi_err("self-check failed for PEB %d", pnum);
1165 goto fail;
1166 }
1167
1168 return 0;
1169
1170fail:
1171 ubi_dump_ec_hdr(ec_hdr);
1172 dump_stack();
1173 return -EINVAL;
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1185{
1186 int err;
1187 uint32_t crc, hdr_crc;
1188 struct ubi_ec_hdr *ec_hdr;
1189
1190 if (!ubi_dbg_chk_io(ubi))
1191 return 0;
1192
1193 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1194 if (!ec_hdr)
1195 return -ENOMEM;
1196
1197 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
1198 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1199 goto exit;
1200
1201 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
1202 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
1203 if (hdr_crc != crc) {
1204 ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
1205 ubi_err("self-check failed for PEB %d", pnum);
1206 ubi_dump_ec_hdr(ec_hdr);
1207 dump_stack();
1208 err = -EINVAL;
1209 goto exit;
1210 }
1211
1212 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
1213
1214exit:
1215 kfree(ec_hdr);
1216 return err;
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1229 const struct ubi_vid_hdr *vid_hdr)
1230{
1231 int err;
1232 uint32_t magic;
1233
1234 if (!ubi_dbg_chk_io(ubi))
1235 return 0;
1236
1237 magic = be32_to_cpu(vid_hdr->magic);
1238 if (magic != UBI_VID_HDR_MAGIC) {
1239 ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
1240 magic, pnum, UBI_VID_HDR_MAGIC);
1241 goto fail;
1242 }
1243
1244 err = validate_vid_hdr(ubi, vid_hdr);
1245 if (err) {
1246 ubi_err("self-check failed for PEB %d", pnum);
1247 goto fail;
1248 }
1249
1250 return err;
1251
1252fail:
1253 ubi_err("self-check failed for PEB %d", pnum);
1254 ubi_dump_vid_hdr(vid_hdr);
1255 dump_stack();
1256 return -EINVAL;
1257
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1269{
1270 int err;
1271 uint32_t crc, hdr_crc;
1272 struct ubi_vid_hdr *vid_hdr;
1273 void *p;
1274
1275 if (!ubi_dbg_chk_io(ubi))
1276 return 0;
1277
1278 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1279 if (!vid_hdr)
1280 return -ENOMEM;
1281
1282 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1283 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1284 ubi->vid_hdr_alsize);
1285 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1286 goto exit;
1287
1288 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
1289 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1290 if (hdr_crc != crc) {
1291 ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
1292 pnum, crc, hdr_crc);
1293 ubi_err("self-check failed for PEB %d", pnum);
1294 ubi_dump_vid_hdr(vid_hdr);
1295 dump_stack();
1296 err = -EINVAL;
1297 goto exit;
1298 }
1299
1300 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1301
1302exit:
1303 ubi_free_vid_hdr(ubi, vid_hdr);
1304 return err;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1320 int offset, int len)
1321{
1322 int err, i;
1323 size_t read;
1324 void *buf1;
1325 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1326
1327 if (!ubi_dbg_chk_io(ubi))
1328 return 0;
1329
1330 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1331 if (!buf1) {
1332 ubi_err("cannot allocate memory to check writes");
1333 return 0;
1334 }
1335
1336 err = mtd_read(ubi->mtd, addr, len, &read, buf1);
1337 if (err && !mtd_is_bitflip(err))
1338 goto out_free;
1339
1340 for (i = 0; i < len; i++) {
1341 uint8_t c = ((uint8_t *)buf)[i];
1342 uint8_t c1 = ((uint8_t *)buf1)[i];
1343 int dump_len;
1344
1345 if (c == c1)
1346 continue;
1347
1348 ubi_err("self-check failed for PEB %d:%d, len %d",
1349 pnum, offset, len);
1350 ubi_msg("data differ at position %d", i);
1351 dump_len = max_t(int, 128, len - i);
1352 ubi_msg("hex dump of the original buffer from %d to %d",
1353 i, i + dump_len);
1354 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1355 buf + i, dump_len, 1);
1356 ubi_msg("hex dump of the read buffer from %d to %d",
1357 i, i + dump_len);
1358 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1359 buf1 + i, dump_len, 1);
1360 dump_stack();
1361 err = -EINVAL;
1362 goto out_free;
1363 }
1364
1365 vfree(buf1);
1366 return 0;
1367
1368out_free:
1369 vfree(buf1);
1370 return err;
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1385{
1386 size_t read;
1387 int err;
1388 void *buf;
1389 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1390
1391 if (!ubi_dbg_chk_io(ubi))
1392 return 0;
1393
1394 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1395 if (!buf) {
1396 ubi_err("cannot allocate memory to check for 0xFFs");
1397 return 0;
1398 }
1399
1400 err = mtd_read(ubi->mtd, addr, len, &read, buf);
1401 if (err && !mtd_is_bitflip(err)) {
1402 ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
1403 err, len, pnum, offset, read);
1404 goto error;
1405 }
1406
1407 err = ubi_check_pattern(buf, 0xFF, len);
1408 if (err == 0) {
1409 ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
1410 pnum, offset, len);
1411 goto fail;
1412 }
1413
1414 vfree(buf);
1415 return 0;
1416
1417fail:
1418 ubi_err("self-check failed for PEB %d", pnum);
1419 ubi_msg("hex dump of the %d-%d region", offset, offset + len);
1420 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1421 err = -EINVAL;
1422error:
1423 dump_stack();
1424 vfree(buf);
1425 return err;
1426}
1427