1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89#include <linux/crc32.h>
90#include <linux/err.h>
91#include <linux/slab.h>
92#include "ubi.h"
93
94static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
95static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
96static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
97 const struct ubi_ec_hdr *ec_hdr);
98static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
99static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
100 const struct ubi_vid_hdr *vid_hdr);
101static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
102 int offset, int len);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
127 int len)
128{
129 int err, retries = 0;
130 size_t read;
131 loff_t addr;
132
133 dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
134
135 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
136 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
137 ubi_assert(len > 0);
138
139 err = self_check_not_bad(ubi, pnum);
140 if (err)
141 return err;
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163 *((uint8_t *)buf) ^= 0xFF;
164
165 addr = (loff_t)pnum * ubi->peb_size + offset;
166retry:
167 err = mtd_read(ubi->mtd, addr, len, &read, buf);
168 if (err) {
169 const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
170
171 if (mtd_is_bitflip(err)) {
172
173
174
175
176
177
178
179
180 ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
181 pnum);
182 ubi_assert(len == read);
183 return UBI_IO_BITFLIPS;
184 }
185
186 if (retries++ < UBI_IO_RETRIES) {
187 ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
188 err, errstr, len, pnum, offset, read);
189 yield();
190 goto retry;
191 }
192
193 ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
194 err, errstr, len, pnum, offset, read);
195 dump_stack();
196
197
198
199
200
201
202 if (read != len && mtd_is_eccerr(err)) {
203 ubi_assert(0);
204 err = -EIO;
205 }
206 } else {
207 ubi_assert(len == read);
208
209 if (ubi_dbg_is_bitflip(ubi)) {
210 dbg_gen("bit-flip (emulated)");
211 err = UBI_IO_BITFLIPS;
212 }
213 }
214
215 return err;
216}
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
236 int len)
237{
238 int err;
239 size_t written;
240 loff_t addr;
241
242 dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
243
244 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
245 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
246 ubi_assert(offset % ubi->hdrs_min_io_size == 0);
247 ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
248
249 if (ubi->ro_mode) {
250 ubi_err(ubi, "read-only mode");
251 return -EROFS;
252 }
253
254 err = self_check_not_bad(ubi, pnum);
255 if (err)
256 return err;
257
258
259 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
260 if (err)
261 return err;
262
263 if (offset >= ubi->leb_start) {
264
265
266
267
268 err = self_check_peb_ec_hdr(ubi, pnum);
269 if (err)
270 return err;
271 err = self_check_peb_vid_hdr(ubi, pnum);
272 if (err)
273 return err;
274 }
275
276 if (ubi_dbg_is_write_failure(ubi)) {
277 ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
278 len, pnum, offset);
279 dump_stack();
280 return -EIO;
281 }
282
283 addr = (loff_t)pnum * ubi->peb_size + offset;
284 err = mtd_write(ubi->mtd, addr, len, &written, buf);
285 if (err) {
286 ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
287 err, len, pnum, offset, written);
288 dump_stack();
289 ubi_dump_flash(ubi, pnum, offset, len);
290 } else
291 ubi_assert(written == len);
292
293 if (!err) {
294 err = self_check_write(ubi, buf, pnum, offset, len);
295 if (err)
296 return err;
297
298
299
300
301
302 offset += len;
303 len = ubi->peb_size - offset;
304 if (len)
305 err = ubi_self_check_all_ff(ubi, pnum, offset, len);
306 }
307
308 return err;
309}
310
311
312
313
314
315
316
317
318static void erase_callback(struct erase_info *ei)
319{
320 wake_up_interruptible((wait_queue_head_t *)ei->priv);
321}
322
323
324
325
326
327
328
329
330
331
332static int do_sync_erase(struct ubi_device *ubi, int pnum)
333{
334 int err, retries = 0;
335 struct erase_info ei;
336 wait_queue_head_t wq;
337
338 dbg_io("erase PEB %d", pnum);
339 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
340
341 if (ubi->ro_mode) {
342 ubi_err(ubi, "read-only mode");
343 return -EROFS;
344 }
345
346retry:
347 init_waitqueue_head(&wq);
348 memset(&ei, 0, sizeof(struct erase_info));
349
350 ei.mtd = ubi->mtd;
351 ei.addr = (loff_t)pnum * ubi->peb_size;
352 ei.len = ubi->peb_size;
353 ei.callback = erase_callback;
354 ei.priv = (unsigned long)&wq;
355
356 err = mtd_erase(ubi->mtd, &ei);
357 if (err) {
358 if (retries++ < UBI_IO_RETRIES) {
359 ubi_warn(ubi, "error %d while erasing PEB %d, retry",
360 err, pnum);
361 yield();
362 goto retry;
363 }
364 ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
365 dump_stack();
366 return err;
367 }
368
369 err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
370 ei.state == MTD_ERASE_FAILED);
371 if (err) {
372 ubi_err(ubi, "interrupted PEB %d erasure", pnum);
373 return -EINTR;
374 }
375
376 if (ei.state == MTD_ERASE_FAILED) {
377 if (retries++ < UBI_IO_RETRIES) {
378 ubi_warn(ubi, "error while erasing PEB %d, retry",
379 pnum);
380 yield();
381 goto retry;
382 }
383 ubi_err(ubi, "cannot erase PEB %d", pnum);
384 dump_stack();
385 return -EIO;
386 }
387
388 err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
389 if (err)
390 return err;
391
392 if (ubi_dbg_is_erase_failure(ubi)) {
393 ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
394 return -EIO;
395 }
396
397 return 0;
398}
399
400
401static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
402
403
404
405
406
407
408
409
410
411
412static int torture_peb(struct ubi_device *ubi, int pnum)
413{
414 int err, i, patt_count;
415
416 ubi_msg(ubi, "run torture test for PEB %d", pnum);
417 patt_count = ARRAY_SIZE(patterns);
418 ubi_assert(patt_count > 0);
419
420 mutex_lock(&ubi->buf_mutex);
421 for (i = 0; i < patt_count; i++) {
422 err = do_sync_erase(ubi, pnum);
423 if (err)
424 goto out;
425
426
427 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
428 if (err)
429 goto out;
430
431 err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
432 if (err == 0) {
433 ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
434 pnum);
435 err = -EIO;
436 goto out;
437 }
438
439
440 memset(ubi->peb_buf, patterns[i], ubi->peb_size);
441 err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
442 if (err)
443 goto out;
444
445 memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
446 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
447 if (err)
448 goto out;
449
450 err = ubi_check_pattern(ubi->peb_buf, patterns[i],
451 ubi->peb_size);
452 if (err == 0) {
453 ubi_err(ubi, "pattern %x checking failed for PEB %d",
454 patterns[i], pnum);
455 err = -EIO;
456 goto out;
457 }
458 }
459
460 err = patt_count;
461 ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
462
463out:
464 mutex_unlock(&ubi->buf_mutex);
465 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
466
467
468
469
470
471 ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
472 pnum);
473 err = -EIO;
474 }
475 return err;
476}
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
499{
500 int err;
501 size_t written;
502 loff_t addr;
503 uint32_t data = 0;
504 struct ubi_ec_hdr ec_hdr;
505 struct ubi_vid_io_buf vidb;
506
507
508
509
510
511
512
513
514 struct ubi_vid_hdr vid_hdr;
515
516
517
518
519
520
521
522
523 addr = (loff_t)pnum * ubi->peb_size;
524 err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
525 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
526 err != UBI_IO_FF){
527 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
528 if(err)
529 goto error;
530 }
531
532 ubi_init_vid_buf(ubi, &vidb, &vid_hdr);
533 ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb));
534
535 err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0);
536 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
537 err != UBI_IO_FF){
538 addr += ubi->vid_hdr_aloffset;
539 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
540 if (err)
541 goto error;
542 }
543 return 0;
544
545error:
546
547
548
549
550
551 ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
552 ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
553 return -EIO;
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
573{
574 int err, ret = 0;
575
576 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
577
578 err = self_check_not_bad(ubi, pnum);
579 if (err != 0)
580 return err;
581
582 if (ubi->ro_mode) {
583 ubi_err(ubi, "read-only mode");
584 return -EROFS;
585 }
586
587 if (ubi->nor_flash) {
588 err = nor_erase_prepare(ubi, pnum);
589 if (err)
590 return err;
591 }
592
593 if (torture) {
594 ret = torture_peb(ubi, pnum);
595 if (ret < 0)
596 return ret;
597 }
598
599 err = do_sync_erase(ubi, pnum);
600 if (err)
601 return err;
602
603 return ret + 1;
604}
605
606
607
608
609
610
611
612
613
614int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
615{
616 struct mtd_info *mtd = ubi->mtd;
617
618 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
619
620 if (ubi->bad_allowed) {
621 int ret;
622
623 ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
624 if (ret < 0)
625 ubi_err(ubi, "error %d while checking if PEB %d is bad",
626 ret, pnum);
627 else if (ret)
628 dbg_io("PEB %d is bad", pnum);
629 return ret;
630 }
631
632 return 0;
633}
634
635
636
637
638
639
640
641
642
643int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
644{
645 int err;
646 struct mtd_info *mtd = ubi->mtd;
647
648 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
649
650 if (ubi->ro_mode) {
651 ubi_err(ubi, "read-only mode");
652 return -EROFS;
653 }
654
655 if (!ubi->bad_allowed)
656 return 0;
657
658 err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
659 if (err)
660 ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
661 return err;
662}
663
664
665
666
667
668
669
670
671
672static int validate_ec_hdr(const struct ubi_device *ubi,
673 const struct ubi_ec_hdr *ec_hdr)
674{
675 long long ec;
676 int vid_hdr_offset, leb_start;
677
678 ec = be64_to_cpu(ec_hdr->ec);
679 vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
680 leb_start = be32_to_cpu(ec_hdr->data_offset);
681
682 if (ec_hdr->version != UBI_VERSION) {
683 ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
684 UBI_VERSION, (int)ec_hdr->version);
685 goto bad;
686 }
687
688 if (vid_hdr_offset != ubi->vid_hdr_offset) {
689 ubi_err(ubi, "bad VID header offset %d, expected %d",
690 vid_hdr_offset, ubi->vid_hdr_offset);
691 goto bad;
692 }
693
694 if (leb_start != ubi->leb_start) {
695 ubi_err(ubi, "bad data offset %d, expected %d",
696 leb_start, ubi->leb_start);
697 goto bad;
698 }
699
700 if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
701 ubi_err(ubi, "bad erase counter %lld", ec);
702 goto bad;
703 }
704
705 return 0;
706
707bad:
708 ubi_err(ubi, "bad EC header");
709 ubi_dump_ec_hdr(ec_hdr);
710 dump_stack();
711 return 1;
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
737 struct ubi_ec_hdr *ec_hdr, int verbose)
738{
739 int err, read_err;
740 uint32_t crc, magic, hdr_crc;
741
742 dbg_io("read EC header from PEB %d", pnum);
743 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
744
745 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
746 if (read_err) {
747 if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
748 return read_err;
749
750
751
752
753
754
755
756
757
758
759 }
760
761 magic = be32_to_cpu(ec_hdr->magic);
762 if (magic != UBI_EC_HDR_MAGIC) {
763 if (mtd_is_eccerr(read_err))
764 return UBI_IO_BAD_HDR_EBADMSG;
765
766
767
768
769
770
771 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
772
773 if (verbose)
774 ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
775 pnum);
776 dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
777 pnum);
778 if (!read_err)
779 return UBI_IO_FF;
780 else
781 return UBI_IO_FF_BITFLIPS;
782 }
783
784
785
786
787
788 if (verbose) {
789 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
790 pnum, magic, UBI_EC_HDR_MAGIC);
791 ubi_dump_ec_hdr(ec_hdr);
792 }
793 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
794 pnum, magic, UBI_EC_HDR_MAGIC);
795 return UBI_IO_BAD_HDR;
796 }
797
798 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
799 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
800
801 if (hdr_crc != crc) {
802 if (verbose) {
803 ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
804 pnum, crc, hdr_crc);
805 ubi_dump_ec_hdr(ec_hdr);
806 }
807 dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
808 pnum, crc, hdr_crc);
809
810 if (!read_err)
811 return UBI_IO_BAD_HDR;
812 else
813 return UBI_IO_BAD_HDR_EBADMSG;
814 }
815
816
817 err = validate_ec_hdr(ubi, ec_hdr);
818 if (err) {
819 ubi_err(ubi, "validation failed for PEB %d", pnum);
820 return -EINVAL;
821 }
822
823
824
825
826
827 return read_err ? UBI_IO_BITFLIPS : 0;
828}
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
846 struct ubi_ec_hdr *ec_hdr)
847{
848 int err;
849 uint32_t crc;
850
851 dbg_io("write EC header to PEB %d", pnum);
852 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
853
854 ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
855 ec_hdr->version = UBI_VERSION;
856 ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
857 ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
858 ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
859 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
860 ec_hdr->hdr_crc = cpu_to_be32(crc);
861
862 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
863 if (err)
864 return err;
865
866 if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
867 return -EROFS;
868
869 err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
870 return err;
871}
872
873
874
875
876
877
878
879
880
881static int validate_vid_hdr(const struct ubi_device *ubi,
882 const struct ubi_vid_hdr *vid_hdr)
883{
884 int vol_type = vid_hdr->vol_type;
885 int copy_flag = vid_hdr->copy_flag;
886 int vol_id = be32_to_cpu(vid_hdr->vol_id);
887 int lnum = be32_to_cpu(vid_hdr->lnum);
888 int compat = vid_hdr->compat;
889 int data_size = be32_to_cpu(vid_hdr->data_size);
890 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
891 int data_pad = be32_to_cpu(vid_hdr->data_pad);
892 int data_crc = be32_to_cpu(vid_hdr->data_crc);
893 int usable_leb_size = ubi->leb_size - data_pad;
894
895 if (copy_flag != 0 && copy_flag != 1) {
896 ubi_err(ubi, "bad copy_flag");
897 goto bad;
898 }
899
900 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
901 data_pad < 0) {
902 ubi_err(ubi, "negative values");
903 goto bad;
904 }
905
906 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
907 ubi_err(ubi, "bad vol_id");
908 goto bad;
909 }
910
911 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
912 ubi_err(ubi, "bad compat");
913 goto bad;
914 }
915
916 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
917 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
918 compat != UBI_COMPAT_REJECT) {
919 ubi_err(ubi, "bad compat");
920 goto bad;
921 }
922
923 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
924 ubi_err(ubi, "bad vol_type");
925 goto bad;
926 }
927
928 if (data_pad >= ubi->leb_size / 2) {
929 ubi_err(ubi, "bad data_pad");
930 goto bad;
931 }
932
933 if (data_size > ubi->leb_size) {
934 ubi_err(ubi, "bad data_size");
935 goto bad;
936 }
937
938 if (vol_type == UBI_VID_STATIC) {
939
940
941
942
943
944
945 if (used_ebs == 0) {
946 ubi_err(ubi, "zero used_ebs");
947 goto bad;
948 }
949 if (data_size == 0) {
950 ubi_err(ubi, "zero data_size");
951 goto bad;
952 }
953 if (lnum < used_ebs - 1) {
954 if (data_size != usable_leb_size) {
955 ubi_err(ubi, "bad data_size");
956 goto bad;
957 }
958 } else if (lnum == used_ebs - 1) {
959 if (data_size == 0) {
960 ubi_err(ubi, "bad data_size at last LEB");
961 goto bad;
962 }
963 } else {
964 ubi_err(ubi, "too high lnum");
965 goto bad;
966 }
967 } else {
968 if (copy_flag == 0) {
969 if (data_crc != 0) {
970 ubi_err(ubi, "non-zero data CRC");
971 goto bad;
972 }
973 if (data_size != 0) {
974 ubi_err(ubi, "non-zero data_size");
975 goto bad;
976 }
977 } else {
978 if (data_size == 0) {
979 ubi_err(ubi, "zero data_size of copy");
980 goto bad;
981 }
982 }
983 if (used_ebs != 0) {
984 ubi_err(ubi, "bad used_ebs");
985 goto bad;
986 }
987 }
988
989 return 0;
990
991bad:
992 ubi_err(ubi, "bad VID header");
993 ubi_dump_vid_hdr(vid_hdr);
994 dump_stack();
995 return 1;
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1014 struct ubi_vid_io_buf *vidb, int verbose)
1015{
1016 int err, read_err;
1017 uint32_t crc, magic, hdr_crc;
1018 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
1019 void *p = vidb->buffer;
1020
1021 dbg_io("read VID header from PEB %d", pnum);
1022 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1023
1024 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1025 ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
1026 if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
1027 return read_err;
1028
1029 magic = be32_to_cpu(vid_hdr->magic);
1030 if (magic != UBI_VID_HDR_MAGIC) {
1031 if (mtd_is_eccerr(read_err))
1032 return UBI_IO_BAD_HDR_EBADMSG;
1033
1034 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1035 if (verbose)
1036 ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
1037 pnum);
1038 dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
1039 pnum);
1040 if (!read_err)
1041 return UBI_IO_FF;
1042 else
1043 return UBI_IO_FF_BITFLIPS;
1044 }
1045
1046 if (verbose) {
1047 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
1048 pnum, magic, UBI_VID_HDR_MAGIC);
1049 ubi_dump_vid_hdr(vid_hdr);
1050 }
1051 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
1052 pnum, magic, UBI_VID_HDR_MAGIC);
1053 return UBI_IO_BAD_HDR;
1054 }
1055
1056 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1057 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1058
1059 if (hdr_crc != crc) {
1060 if (verbose) {
1061 ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
1062 pnum, crc, hdr_crc);
1063 ubi_dump_vid_hdr(vid_hdr);
1064 }
1065 dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
1066 pnum, crc, hdr_crc);
1067 if (!read_err)
1068 return UBI_IO_BAD_HDR;
1069 else
1070 return UBI_IO_BAD_HDR_EBADMSG;
1071 }
1072
1073 err = validate_vid_hdr(ubi, vid_hdr);
1074 if (err) {
1075 ubi_err(ubi, "validation failed for PEB %d", pnum);
1076 return -EINVAL;
1077 }
1078
1079 return read_err ? UBI_IO_BITFLIPS : 0;
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1098 struct ubi_vid_io_buf *vidb)
1099{
1100 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
1101 int err;
1102 uint32_t crc;
1103 void *p = vidb->buffer;
1104
1105 dbg_io("write VID header to PEB %d", pnum);
1106 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1107
1108 err = self_check_peb_ec_hdr(ubi, pnum);
1109 if (err)
1110 return err;
1111
1112 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
1113 vid_hdr->version = UBI_VERSION;
1114 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1115 vid_hdr->hdr_crc = cpu_to_be32(crc);
1116
1117 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1118 if (err)
1119 return err;
1120
1121 if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
1122 return -EROFS;
1123
1124 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
1125 ubi->vid_hdr_alsize);
1126 return err;
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
1138{
1139 int err;
1140
1141 if (!ubi_dbg_chk_io(ubi))
1142 return 0;
1143
1144 err = ubi_io_is_bad(ubi, pnum);
1145 if (!err)
1146 return err;
1147
1148 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1149 dump_stack();
1150 return err > 0 ? -EINVAL : err;
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1163 const struct ubi_ec_hdr *ec_hdr)
1164{
1165 int err;
1166 uint32_t magic;
1167
1168 if (!ubi_dbg_chk_io(ubi))
1169 return 0;
1170
1171 magic = be32_to_cpu(ec_hdr->magic);
1172 if (magic != UBI_EC_HDR_MAGIC) {
1173 ubi_err(ubi, "bad magic %#08x, must be %#08x",
1174 magic, UBI_EC_HDR_MAGIC);
1175 goto fail;
1176 }
1177
1178 err = validate_ec_hdr(ubi, ec_hdr);
1179 if (err) {
1180 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1181 goto fail;
1182 }
1183
1184 return 0;
1185
1186fail:
1187 ubi_dump_ec_hdr(ec_hdr);
1188 dump_stack();
1189 return -EINVAL;
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1201{
1202 int err;
1203 uint32_t crc, hdr_crc;
1204 struct ubi_ec_hdr *ec_hdr;
1205
1206 if (!ubi_dbg_chk_io(ubi))
1207 return 0;
1208
1209 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1210 if (!ec_hdr)
1211 return -ENOMEM;
1212
1213 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
1214 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1215 goto exit;
1216
1217 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
1218 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
1219 if (hdr_crc != crc) {
1220 ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
1221 crc, hdr_crc);
1222 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1223 ubi_dump_ec_hdr(ec_hdr);
1224 dump_stack();
1225 err = -EINVAL;
1226 goto exit;
1227 }
1228
1229 err = self_check_ec_hdr(ubi, pnum, ec_hdr);
1230
1231exit:
1232 kfree(ec_hdr);
1233 return err;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1246 const struct ubi_vid_hdr *vid_hdr)
1247{
1248 int err;
1249 uint32_t magic;
1250
1251 if (!ubi_dbg_chk_io(ubi))
1252 return 0;
1253
1254 magic = be32_to_cpu(vid_hdr->magic);
1255 if (magic != UBI_VID_HDR_MAGIC) {
1256 ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
1257 magic, pnum, UBI_VID_HDR_MAGIC);
1258 goto fail;
1259 }
1260
1261 err = validate_vid_hdr(ubi, vid_hdr);
1262 if (err) {
1263 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1264 goto fail;
1265 }
1266
1267 return err;
1268
1269fail:
1270 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1271 ubi_dump_vid_hdr(vid_hdr);
1272 dump_stack();
1273 return -EINVAL;
1274
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1286{
1287 int err;
1288 uint32_t crc, hdr_crc;
1289 struct ubi_vid_io_buf *vidb;
1290 struct ubi_vid_hdr *vid_hdr;
1291 void *p;
1292
1293 if (!ubi_dbg_chk_io(ubi))
1294 return 0;
1295
1296 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1297 if (!vidb)
1298 return -ENOMEM;
1299
1300 vid_hdr = ubi_get_vid_hdr(vidb);
1301 p = vidb->buffer;
1302 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1303 ubi->vid_hdr_alsize);
1304 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1305 goto exit;
1306
1307 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1308 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1309 if (hdr_crc != crc) {
1310 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
1311 pnum, crc, hdr_crc);
1312 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1313 ubi_dump_vid_hdr(vid_hdr);
1314 dump_stack();
1315 err = -EINVAL;
1316 goto exit;
1317 }
1318
1319 err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1320
1321exit:
1322 ubi_free_vid_buf(vidb);
1323 return err;
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1339 int offset, int len)
1340{
1341 int err, i;
1342 size_t read;
1343 void *buf1;
1344 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1345
1346 if (!ubi_dbg_chk_io(ubi))
1347 return 0;
1348
1349 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1350 if (!buf1) {
1351 ubi_err(ubi, "cannot allocate memory to check writes");
1352 return 0;
1353 }
1354
1355 err = mtd_read(ubi->mtd, addr, len, &read, buf1);
1356 if (err && !mtd_is_bitflip(err))
1357 goto out_free;
1358
1359 for (i = 0; i < len; i++) {
1360 uint8_t c = ((uint8_t *)buf)[i];
1361 uint8_t c1 = ((uint8_t *)buf1)[i];
1362 int dump_len;
1363
1364 if (c == c1)
1365 continue;
1366
1367 ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
1368 pnum, offset, len);
1369 ubi_msg(ubi, "data differ at position %d", i);
1370 dump_len = max_t(int, 128, len - i);
1371 ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
1372 i, i + dump_len);
1373 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1374 buf + i, dump_len, 1);
1375 ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
1376 i, i + dump_len);
1377 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1378 buf1 + i, dump_len, 1);
1379 dump_stack();
1380 err = -EINVAL;
1381 goto out_free;
1382 }
1383
1384 vfree(buf1);
1385 return 0;
1386
1387out_free:
1388 vfree(buf1);
1389 return err;
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1404{
1405 size_t read;
1406 int err;
1407 void *buf;
1408 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1409
1410 if (!ubi_dbg_chk_io(ubi))
1411 return 0;
1412
1413 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1414 if (!buf) {
1415 ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
1416 return 0;
1417 }
1418
1419 err = mtd_read(ubi->mtd, addr, len, &read, buf);
1420 if (err && !mtd_is_bitflip(err)) {
1421 ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
1422 err, len, pnum, offset, read);
1423 goto error;
1424 }
1425
1426 err = ubi_check_pattern(buf, 0xFF, len);
1427 if (err == 0) {
1428 ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
1429 pnum, offset, len);
1430 goto fail;
1431 }
1432
1433 vfree(buf);
1434 return 0;
1435
1436fail:
1437 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1438 ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
1439 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1440 err = -EINVAL;
1441error:
1442 dump_stack();
1443 vfree(buf);
1444 return err;
1445}
1446