1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76#ifndef __UBOOT__
77#include <log.h>
78#include <dm/devres.h>
79#include <linux/crc32.h>
80#include <linux/err.h>
81#include <linux/slab.h>
82#include <u-boot/crc.h>
83#else
84#include <hexdump.h>
85#include <ubi_uboot.h>
86#endif
87
88#include "ubi.h"
89
90static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
91static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
92static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
93 const struct ubi_ec_hdr *ec_hdr);
94static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
95static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
96 const struct ubi_vid_hdr *vid_hdr);
97static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
98 int offset, int len);
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
123 int len)
124{
125 int err, retries = 0;
126 size_t read;
127 loff_t addr;
128
129 dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
130
131 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
132 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
133 ubi_assert(len > 0);
134
135 err = self_check_not_bad(ubi, pnum);
136 if (err)
137 return err;
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159 *((uint8_t *)buf) ^= 0xFF;
160
161 addr = (loff_t)pnum * ubi->peb_size + offset;
162retry:
163 err = mtd_read(ubi->mtd, addr, len, &read, buf);
164 if (err) {
165 const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
166
167 if (mtd_is_bitflip(err)) {
168
169
170
171
172
173
174
175
176 ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
177 pnum);
178 ubi_assert(len == read);
179 return UBI_IO_BITFLIPS;
180 }
181
182 if (retries++ < UBI_IO_RETRIES) {
183 ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
184 err, errstr, len, pnum, offset, read);
185 yield();
186 goto retry;
187 }
188
189 ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
190 err, errstr, len, pnum, offset, read);
191 dump_stack();
192
193
194
195
196
197
198 if (read != len && mtd_is_eccerr(err)) {
199 ubi_assert(0);
200 err = -EIO;
201 }
202 } else {
203 ubi_assert(len == read);
204
205 if (ubi_dbg_is_bitflip(ubi)) {
206 dbg_gen("bit-flip (emulated)");
207 err = UBI_IO_BITFLIPS;
208 }
209 }
210
211 return err;
212}
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
232 int len)
233{
234 int err;
235 size_t written;
236 loff_t addr;
237
238 dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
239
240 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
241 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
242 ubi_assert(offset % ubi->hdrs_min_io_size == 0);
243 ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
244
245 if (ubi->ro_mode) {
246 ubi_err(ubi, "read-only mode");
247 return -EROFS;
248 }
249
250 err = self_check_not_bad(ubi, pnum);
251 if (err)
252 return err;
253
254
255 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
256 if (err)
257 return err;
258
259 if (offset >= ubi->leb_start) {
260
261
262
263
264 err = self_check_peb_ec_hdr(ubi, pnum);
265 if (err)
266 return err;
267 err = self_check_peb_vid_hdr(ubi, pnum);
268 if (err)
269 return err;
270 }
271
272 if (ubi_dbg_is_write_failure(ubi)) {
273 ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
274 len, pnum, offset);
275 dump_stack();
276 return -EIO;
277 }
278
279 addr = (loff_t)pnum * ubi->peb_size + offset;
280 err = mtd_write(ubi->mtd, addr, len, &written, buf);
281 if (err) {
282 ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
283 err, len, pnum, offset, written);
284 dump_stack();
285 ubi_dump_flash(ubi, pnum, offset, len);
286 } else
287 ubi_assert(written == len);
288
289 if (!err) {
290 err = self_check_write(ubi, buf, pnum, offset, len);
291 if (err)
292 return err;
293
294
295
296
297
298 offset += len;
299 len = ubi->peb_size - offset;
300 if (len)
301 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
302 }
303
304 return err;
305}
306
307
308
309
310
311
312
313
314
315
316static int do_sync_erase(struct ubi_device *ubi, int pnum)
317{
318 int err, retries = 0;
319 struct erase_info ei;
320 wait_queue_head_t wq;
321
322 dbg_io("erase PEB %d", pnum);
323 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
324
325 if (ubi->ro_mode) {
326 ubi_err(ubi, "read-only mode");
327 return -EROFS;
328 }
329
330retry:
331 init_waitqueue_head(&wq);
332 memset(&ei, 0, sizeof(struct erase_info));
333
334 ei.mtd = ubi->mtd;
335 ei.addr = (loff_t)pnum * ubi->peb_size;
336 ei.len = ubi->peb_size;
337 ei.priv = (unsigned long)&wq;
338
339 err = mtd_erase(ubi->mtd, &ei);
340 if (err) {
341 if (retries++ < UBI_IO_RETRIES) {
342 ubi_warn(ubi, "error %d while erasing PEB %d, retry",
343 err, pnum);
344 yield();
345 goto retry;
346 }
347 ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
348 dump_stack();
349 return err;
350 }
351
352 err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
353 ei.state == MTD_ERASE_FAILED);
354 if (err) {
355 ubi_err(ubi, "interrupted PEB %d erasure", pnum);
356 return -EINTR;
357 }
358
359 if (ei.state == MTD_ERASE_FAILED) {
360 if (retries++ < UBI_IO_RETRIES) {
361 ubi_warn(ubi, "error while erasing PEB %d, retry",
362 pnum);
363 yield();
364 goto retry;
365 }
366 ubi_err(ubi, "cannot erase PEB %d", pnum);
367 dump_stack();
368 return -EIO;
369 }
370
371 err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
372 if (err)
373 return err;
374
375 if (ubi_dbg_is_erase_failure(ubi)) {
376 ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
377 return -EIO;
378 }
379
380 return 0;
381}
382
383
384static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
385
386
387
388
389
390
391
392
393
394
395static int torture_peb(struct ubi_device *ubi, int pnum)
396{
397 int err, i, patt_count;
398
399 ubi_msg(ubi, "run torture test for PEB %d", pnum);
400 patt_count = ARRAY_SIZE(patterns);
401 ubi_assert(patt_count > 0);
402
403 mutex_lock(&ubi->buf_mutex);
404 for (i = 0; i < patt_count; i++) {
405 err = do_sync_erase(ubi, pnum);
406 if (err)
407 goto out;
408
409
410 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
411 if (err)
412 goto out;
413
414 err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
415 if (err == 0) {
416 ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
417 pnum);
418 err = -EIO;
419 goto out;
420 }
421
422
423 memset(ubi->peb_buf, patterns[i], ubi->peb_size);
424 err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
425 if (err)
426 goto out;
427
428 memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
429 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
430 if (err)
431 goto out;
432
433 err = ubi_check_pattern(ubi->peb_buf, patterns[i],
434 ubi->peb_size);
435 if (err == 0) {
436 ubi_err(ubi, "pattern %x checking failed for PEB %d",
437 patterns[i], pnum);
438 err = -EIO;
439 goto out;
440 }
441 }
442
443 err = patt_count;
444 ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
445
446out:
447 mutex_unlock(&ubi->buf_mutex);
448 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
449
450
451
452
453
454 ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
455 pnum);
456 err = -EIO;
457 }
458 return err;
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
482{
483 int err;
484 size_t written;
485 loff_t addr;
486 uint32_t data = 0;
487 struct ubi_ec_hdr ec_hdr;
488
489
490
491
492
493
494
495
496 struct ubi_vid_hdr vid_hdr;
497
498
499
500
501
502
503
504
505 addr = (loff_t)pnum * ubi->peb_size;
506 err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
507 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
508 err != UBI_IO_FF){
509 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
510 if(err)
511 goto error;
512 }
513
514 err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
515 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
516 err != UBI_IO_FF){
517 addr += ubi->vid_hdr_aloffset;
518 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
519 if (err)
520 goto error;
521 }
522 return 0;
523
524error:
525
526
527
528
529
530 ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
531 ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
532 return -EIO;
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
552{
553 int err, ret = 0;
554
555 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
556
557 err = self_check_not_bad(ubi, pnum);
558 if (err != 0)
559 return err;
560
561 if (ubi->ro_mode) {
562 ubi_err(ubi, "read-only mode");
563 return -EROFS;
564 }
565
566 if (ubi->nor_flash) {
567 err = nor_erase_prepare(ubi, pnum);
568 if (err)
569 return err;
570 }
571
572 if (torture) {
573 ret = torture_peb(ubi, pnum);
574 if (ret < 0)
575 return ret;
576 }
577
578 err = do_sync_erase(ubi, pnum);
579 if (err)
580 return err;
581
582 return ret + 1;
583}
584
585
586
587
588
589
590
591
592
593int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
594{
595 struct mtd_info *mtd = ubi->mtd;
596
597 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
598
599 if (ubi->bad_allowed) {
600 int ret;
601
602 ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
603 if (ret < 0)
604 ubi_err(ubi, "error %d while checking if PEB %d is bad",
605 ret, pnum);
606 else if (ret)
607 dbg_io("PEB %d is bad", pnum);
608 return ret;
609 }
610
611 return 0;
612}
613
614
615
616
617
618
619
620
621
622int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
623{
624 int err;
625 struct mtd_info *mtd = ubi->mtd;
626
627 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
628
629 if (ubi->ro_mode) {
630 ubi_err(ubi, "read-only mode");
631 return -EROFS;
632 }
633
634 if (!ubi->bad_allowed)
635 return 0;
636
637 err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
638 if (err)
639 ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
640 return err;
641}
642
643
644
645
646
647
648
649
650
651static int validate_ec_hdr(const struct ubi_device *ubi,
652 const struct ubi_ec_hdr *ec_hdr)
653{
654 long long ec;
655 int vid_hdr_offset, leb_start;
656
657 ec = be64_to_cpu(ec_hdr->ec);
658 vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
659 leb_start = be32_to_cpu(ec_hdr->data_offset);
660
661 if (ec_hdr->version != UBI_VERSION) {
662 ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
663 UBI_VERSION, (int)ec_hdr->version);
664 goto bad;
665 }
666
667 if (vid_hdr_offset != ubi->vid_hdr_offset) {
668 ubi_err(ubi, "bad VID header offset %d, expected %d",
669 vid_hdr_offset, ubi->vid_hdr_offset);
670 goto bad;
671 }
672
673 if (leb_start != ubi->leb_start) {
674 ubi_err(ubi, "bad data offset %d, expected %d",
675 leb_start, ubi->leb_start);
676 goto bad;
677 }
678
679 if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
680 ubi_err(ubi, "bad erase counter %lld", ec);
681 goto bad;
682 }
683
684 return 0;
685
686bad:
687 ubi_err(ubi, "bad EC header");
688 ubi_dump_ec_hdr(ec_hdr);
689 dump_stack();
690 return 1;
691}
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
716 struct ubi_ec_hdr *ec_hdr, int verbose)
717{
718 int err, read_err;
719 uint32_t crc, magic, hdr_crc;
720
721 dbg_io("read EC header from PEB %d", pnum);
722 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
723
724 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
725 if (read_err) {
726 if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
727 return read_err;
728
729
730
731
732
733
734
735
736
737
738 }
739
740 magic = be32_to_cpu(ec_hdr->magic);
741 if (magic != UBI_EC_HDR_MAGIC) {
742 if (mtd_is_eccerr(read_err))
743 return UBI_IO_BAD_HDR_EBADMSG;
744
745
746
747
748
749
750 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
751
752 if (verbose)
753 ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
754 pnum);
755 dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
756 pnum);
757 if (!read_err)
758 return UBI_IO_FF;
759 else
760 return UBI_IO_FF_BITFLIPS;
761 }
762
763
764
765
766
767 if (verbose) {
768 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
769 pnum, magic, UBI_EC_HDR_MAGIC);
770 ubi_dump_ec_hdr(ec_hdr);
771 }
772 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
773 pnum, magic, UBI_EC_HDR_MAGIC);
774 return UBI_IO_BAD_HDR;
775 }
776
777 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
778 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
779
780 if (hdr_crc != crc) {
781 if (verbose) {
782 ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
783 pnum, crc, hdr_crc);
784 ubi_dump_ec_hdr(ec_hdr);
785 }
786 dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
787 pnum, crc, hdr_crc);
788
789 if (!read_err)
790 return UBI_IO_BAD_HDR;
791 else
792 return UBI_IO_BAD_HDR_EBADMSG;
793 }
794
795
796 err = validate_ec_hdr(ubi, ec_hdr);
797 if (err) {
798 ubi_err(ubi, "validation failed for PEB %d", pnum);
799 return -EINVAL;
800 }
801
802
803
804
805
806 return read_err ? UBI_IO_BITFLIPS : 0;
807}
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
825 struct ubi_ec_hdr *ec_hdr)
826{
827 int err;
828 uint32_t crc;
829
830 dbg_io("write EC header to PEB %d", pnum);
831 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
832
833 ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
834 ec_hdr->version = UBI_VERSION;
835 ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
836 ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
837 ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
838 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
839 ec_hdr->hdr_crc = cpu_to_be32(crc);
840
841 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
842 if (err)
843 return err;
844
845 if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
846 return -EROFS;
847
848 err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
849 return err;
850}
851
852
853
854
855
856
857
858
859
860static int validate_vid_hdr(const struct ubi_device *ubi,
861 const struct ubi_vid_hdr *vid_hdr)
862{
863 int vol_type = vid_hdr->vol_type;
864 int copy_flag = vid_hdr->copy_flag;
865 int vol_id = be32_to_cpu(vid_hdr->vol_id);
866 int lnum = be32_to_cpu(vid_hdr->lnum);
867 int compat = vid_hdr->compat;
868 int data_size = be32_to_cpu(vid_hdr->data_size);
869 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
870 int data_pad = be32_to_cpu(vid_hdr->data_pad);
871 int data_crc = be32_to_cpu(vid_hdr->data_crc);
872 int usable_leb_size = ubi->leb_size - data_pad;
873
874 if (copy_flag != 0 && copy_flag != 1) {
875 ubi_err(ubi, "bad copy_flag");
876 goto bad;
877 }
878
879 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
880 data_pad < 0) {
881 ubi_err(ubi, "negative values");
882 goto bad;
883 }
884
885 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
886 ubi_err(ubi, "bad vol_id");
887 goto bad;
888 }
889
890 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
891 ubi_err(ubi, "bad compat");
892 goto bad;
893 }
894
895 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
896 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
897 compat != UBI_COMPAT_REJECT) {
898 ubi_err(ubi, "bad compat");
899 goto bad;
900 }
901
902 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
903 ubi_err(ubi, "bad vol_type");
904 goto bad;
905 }
906
907 if (data_pad >= ubi->leb_size / 2) {
908 ubi_err(ubi, "bad data_pad");
909 goto bad;
910 }
911
912 if (vol_type == UBI_VID_STATIC) {
913
914
915
916
917
918
919 if (used_ebs == 0) {
920 ubi_err(ubi, "zero used_ebs");
921 goto bad;
922 }
923 if (data_size == 0) {
924 ubi_err(ubi, "zero data_size");
925 goto bad;
926 }
927 if (lnum < used_ebs - 1) {
928 if (data_size != usable_leb_size) {
929 ubi_err(ubi, "bad data_size");
930 goto bad;
931 }
932 } else if (lnum == used_ebs - 1) {
933 if (data_size == 0) {
934 ubi_err(ubi, "bad data_size at last LEB");
935 goto bad;
936 }
937 } else {
938 ubi_err(ubi, "too high lnum");
939 goto bad;
940 }
941 } else {
942 if (copy_flag == 0) {
943 if (data_crc != 0) {
944 ubi_err(ubi, "non-zero data CRC");
945 goto bad;
946 }
947 if (data_size != 0) {
948 ubi_err(ubi, "non-zero data_size");
949 goto bad;
950 }
951 } else {
952 if (data_size == 0) {
953 ubi_err(ubi, "zero data_size of copy");
954 goto bad;
955 }
956 }
957 if (used_ebs != 0) {
958 ubi_err(ubi, "bad used_ebs");
959 goto bad;
960 }
961 }
962
963 return 0;
964
965bad:
966 ubi_err(ubi, "bad VID header");
967 ubi_dump_vid_hdr(vid_hdr);
968 dump_stack();
969 return 1;
970}
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
989 struct ubi_vid_hdr *vid_hdr, int verbose)
990{
991 int err, read_err;
992 uint32_t crc, magic, hdr_crc;
993 void *p;
994
995 dbg_io("read VID header from PEB %d", pnum);
996 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
997
998 p = (char *)vid_hdr - ubi->vid_hdr_shift;
999 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1000 ubi->vid_hdr_alsize);
1001 if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
1002 return read_err;
1003
1004 magic = be32_to_cpu(vid_hdr->magic);
1005 if (magic != UBI_VID_HDR_MAGIC) {
1006 if (mtd_is_eccerr(read_err))
1007 return UBI_IO_BAD_HDR_EBADMSG;
1008
1009 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1010 if (verbose)
1011 ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
1012 pnum);
1013 dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
1014 pnum);
1015 if (!read_err)
1016 return UBI_IO_FF;
1017 else
1018 return UBI_IO_FF_BITFLIPS;
1019 }
1020
1021 if (verbose) {
1022 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
1023 pnum, magic, UBI_VID_HDR_MAGIC);
1024 ubi_dump_vid_hdr(vid_hdr);
1025 }
1026 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
1027 pnum, magic, UBI_VID_HDR_MAGIC);
1028 return UBI_IO_BAD_HDR;
1029 }
1030
1031 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1032 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1033
1034 if (hdr_crc != crc) {
1035 if (verbose) {
1036 ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
1037 pnum, crc, hdr_crc);
1038 ubi_dump_vid_hdr(vid_hdr);
1039 }
1040 dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
1041 pnum, crc, hdr_crc);
1042 if (!read_err)
1043 return UBI_IO_BAD_HDR;
1044 else
1045 return UBI_IO_BAD_HDR_EBADMSG;
1046 }
1047
1048 err = validate_vid_hdr(ubi, vid_hdr);
1049 if (err) {
1050 ubi_err(ubi, "validation failed for PEB %d", pnum);
1051 return -EINVAL;
1052 }
1053
1054 return read_err ? UBI_IO_BITFLIPS : 0;
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1073 struct ubi_vid_hdr *vid_hdr)
1074{
1075 int err;
1076 uint32_t crc;
1077 void *p;
1078
1079 dbg_io("write VID header to PEB %d", pnum);
1080 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1081
1082 err = self_check_peb_ec_hdr(ubi, pnum);
1083 if (err)
1084 return err;
1085
1086 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
1087 vid_hdr->version = UBI_VERSION;
1088 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1089 vid_hdr->hdr_crc = cpu_to_be32(crc);
1090
1091 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1092 if (err)
1093 return err;
1094
1095 if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
1096 return -EROFS;
1097
1098 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1099 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
1100 ubi->vid_hdr_alsize);
1101 return err;
1102}
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
1113{
1114 int err;
1115
1116 if (!ubi_dbg_chk_io(ubi))
1117 return 0;
1118
1119 err = ubi_io_is_bad(ubi, pnum);
1120 if (!err)
1121 return err;
1122
1123 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1124 dump_stack();
1125 return err > 0 ? -EINVAL : err;
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1138 const struct ubi_ec_hdr *ec_hdr)
1139{
1140 int err;
1141 uint32_t magic;
1142
1143 if (!ubi_dbg_chk_io(ubi))
1144 return 0;
1145
1146 magic = be32_to_cpu(ec_hdr->magic);
1147 if (magic != UBI_EC_HDR_MAGIC) {
1148 ubi_err(ubi, "bad magic %#08x, must be %#08x",
1149 magic, UBI_EC_HDR_MAGIC);
1150 goto fail;
1151 }
1152
1153 err = validate_ec_hdr(ubi, ec_hdr);
1154 if (err) {
1155 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1156 goto fail;
1157 }
1158
1159 return 0;
1160
1161fail:
1162 ubi_dump_ec_hdr(ec_hdr);
1163 dump_stack();
1164 return -EINVAL;
1165}
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1176{
1177 int err;
1178 uint32_t crc, hdr_crc;
1179 struct ubi_ec_hdr *ec_hdr;
1180
1181 if (!ubi_dbg_chk_io(ubi))
1182 return 0;
1183
1184 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1185 if (!ec_hdr)
1186 return -ENOMEM;
1187
1188 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
1189 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1190 goto exit;
1191
1192 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
1193 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
1194 if (hdr_crc != crc) {
1195 ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
1196 crc, hdr_crc);
1197 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1198 ubi_dump_ec_hdr(ec_hdr);
1199 dump_stack();
1200 err = -EINVAL;
1201 goto exit;
1202 }
1203
1204 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
1205
1206exit:
1207 kfree(ec_hdr);
1208 return err;
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1221 const struct ubi_vid_hdr *vid_hdr)
1222{
1223 int err;
1224 uint32_t magic;
1225
1226 if (!ubi_dbg_chk_io(ubi))
1227 return 0;
1228
1229 magic = be32_to_cpu(vid_hdr->magic);
1230 if (magic != UBI_VID_HDR_MAGIC) {
1231 ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
1232 magic, pnum, UBI_VID_HDR_MAGIC);
1233 goto fail;
1234 }
1235
1236 err = validate_vid_hdr(ubi, vid_hdr);
1237 if (err) {
1238 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1239 goto fail;
1240 }
1241
1242 return err;
1243
1244fail:
1245 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1246 ubi_dump_vid_hdr(vid_hdr);
1247 dump_stack();
1248 return -EINVAL;
1249
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1261{
1262 int err;
1263 uint32_t crc, hdr_crc;
1264 struct ubi_vid_hdr *vid_hdr;
1265 void *p;
1266
1267 if (!ubi_dbg_chk_io(ubi))
1268 return 0;
1269
1270 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1271 if (!vid_hdr)
1272 return -ENOMEM;
1273
1274 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1275 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1276 ubi->vid_hdr_alsize);
1277 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1278 goto exit;
1279
1280 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
1281 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1282 if (hdr_crc != crc) {
1283 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
1284 pnum, crc, hdr_crc);
1285 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1286 ubi_dump_vid_hdr(vid_hdr);
1287 dump_stack();
1288 err = -EINVAL;
1289 goto exit;
1290 }
1291
1292 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1293
1294exit:
1295 ubi_free_vid_hdr(ubi, vid_hdr);
1296 return err;
1297}
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1312 int offset, int len)
1313{
1314 int err, i;
1315 size_t read;
1316 void *buf1;
1317 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1318
1319 if (!ubi_dbg_chk_io(ubi))
1320 return 0;
1321
1322 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1323 if (!buf1) {
1324 ubi_err(ubi, "cannot allocate memory to check writes");
1325 return 0;
1326 }
1327
1328 err = mtd_read(ubi->mtd, addr, len, &read, buf1);
1329 if (err && !mtd_is_bitflip(err))
1330 goto out_free;
1331
1332 for (i = 0; i < len; i++) {
1333 uint8_t c = ((uint8_t *)buf)[i];
1334 uint8_t c1 = ((uint8_t *)buf1)[i];
1335#if !defined(CONFIG_UBI_SILENCE_MSG)
1336 int dump_len = max_t(int, 128, len - i);
1337#endif
1338
1339 if (c == c1)
1340 continue;
1341
1342 ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
1343 pnum, offset, len);
1344#if !defined(CONFIG_UBI_SILENCE_MSG)
1345 ubi_msg(ubi, "data differ at position %d", i);
1346 ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
1347 i, i + dump_len);
1348 print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1,
1349 buf + i, dump_len, 1);
1350 ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
1351 i, i + dump_len);
1352 print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1,
1353 buf1 + i, dump_len, 1);
1354#endif
1355 dump_stack();
1356 err = -EINVAL;
1357 goto out_free;
1358 }
1359
1360 vfree(buf1);
1361 return 0;
1362
1363out_free:
1364 vfree(buf1);
1365 return err;
1366}
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1380{
1381 size_t read;
1382 int err;
1383 void *buf;
1384 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1385
1386 if (!ubi_dbg_chk_io(ubi))
1387 return 0;
1388
1389 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1390 if (!buf) {
1391 ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
1392 return 0;
1393 }
1394
1395 err = mtd_read(ubi->mtd, addr, len, &read, buf);
1396 if (err && !mtd_is_bitflip(err)) {
1397 ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
1398 err, len, pnum, offset, read);
1399 goto error;
1400 }
1401
1402 err = ubi_check_pattern(buf, 0xFF, len);
1403 if (err == 0) {
1404 ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
1405 pnum, offset, len);
1406 goto fail;
1407 }
1408
1409 vfree(buf);
1410 return 0;
1411
1412fail:
1413 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1414 ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
1415 print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1416 err = -EINVAL;
1417error:
1418 dump_stack();
1419 vfree(buf);
1420 return err;
1421}
1422