1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/slab.h>
32#include <linux/crc32.h>
33#include <linux/err.h>
34#include "ubi.h"
35
36
37#define EBA_RESERVED_PEBS 1
38
39
40
41
42
43
44
45
46
47struct ubi_eba_entry {
48 int pnum;
49};
50
51
52
53
54
55
56
57
58
59struct ubi_eba_table {
60 struct ubi_eba_entry *entries;
61};
62
63
64
65
66
67
68
69
70
71unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
72{
73 unsigned long long sqnum;
74
75 spin_lock(&ubi->ltree_lock);
76 sqnum = ubi->global_sqnum++;
77 spin_unlock(&ubi->ltree_lock);
78
79 return sqnum;
80}
81
82
83
84
85
86
87
88
89
90static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
91{
92 if (vol_id == UBI_LAYOUT_VOLUME_ID)
93 return UBI_LAYOUT_VOLUME_COMPAT;
94 return 0;
95}
96
97
98
99
100
101
102
103
104
105
106
107void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
108 struct ubi_eba_leb_desc *ldesc)
109{
110 ldesc->lnum = lnum;
111 ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
112}
113
114
115
116
117
118
119
120
121
122
123struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
124 int nentries)
125{
126 struct ubi_eba_table *tbl;
127 int err = -ENOMEM;
128 int i;
129
130 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
131 if (!tbl)
132 return ERR_PTR(-ENOMEM);
133
134 tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
135 GFP_KERNEL);
136 if (!tbl->entries)
137 goto err;
138
139 for (i = 0; i < nentries; i++)
140 tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
141
142 return tbl;
143
144err:
145 kfree(tbl);
146
147 return ERR_PTR(err);
148}
149
150
151
152
153
154
155
156void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
157{
158 if (!tbl)
159 return;
160
161 kfree(tbl->entries);
162 kfree(tbl);
163}
164
165
166
167
168
169
170
171
172
173void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
174 int nentries)
175{
176 struct ubi_eba_table *src;
177 int i;
178
179 ubi_assert(dst && vol && vol->eba_tbl);
180
181 src = vol->eba_tbl;
182
183 for (i = 0; i < nentries; i++)
184 dst->entries[i].pnum = src->entries[i].pnum;
185}
186
187
188
189
190
191
192
193
194void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
195{
196 ubi_eba_destroy_table(vol->eba_tbl);
197 vol->eba_tbl = tbl;
198}
199
200
201
202
203
204
205
206
207
208
209
210static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
211 int lnum)
212{
213 struct rb_node *p;
214
215 p = ubi->ltree.rb_node;
216 while (p) {
217 struct ubi_ltree_entry *le;
218
219 le = rb_entry(p, struct ubi_ltree_entry, rb);
220
221 if (vol_id < le->vol_id)
222 p = p->rb_left;
223 else if (vol_id > le->vol_id)
224 p = p->rb_right;
225 else {
226 if (lnum < le->lnum)
227 p = p->rb_left;
228 else if (lnum > le->lnum)
229 p = p->rb_right;
230 else
231 return le;
232 }
233 }
234
235 return NULL;
236}
237
238
239
240
241
242
243
244
245
246
247
248
249static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
250 int vol_id, int lnum)
251{
252 struct ubi_ltree_entry *le, *le1, *le_free;
253
254 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
255 if (!le)
256 return ERR_PTR(-ENOMEM);
257
258 le->users = 0;
259 init_rwsem(&le->mutex);
260 le->vol_id = vol_id;
261 le->lnum = lnum;
262
263 spin_lock(&ubi->ltree_lock);
264 le1 = ltree_lookup(ubi, vol_id, lnum);
265
266 if (le1) {
267
268
269
270
271 le_free = le;
272 le = le1;
273 } else {
274 struct rb_node **p, *parent = NULL;
275
276
277
278
279
280 le_free = NULL;
281
282 p = &ubi->ltree.rb_node;
283 while (*p) {
284 parent = *p;
285 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
286
287 if (vol_id < le1->vol_id)
288 p = &(*p)->rb_left;
289 else if (vol_id > le1->vol_id)
290 p = &(*p)->rb_right;
291 else {
292 ubi_assert(lnum != le1->lnum);
293 if (lnum < le1->lnum)
294 p = &(*p)->rb_left;
295 else
296 p = &(*p)->rb_right;
297 }
298 }
299
300 rb_link_node(&le->rb, parent, p);
301 rb_insert_color(&le->rb, &ubi->ltree);
302 }
303 le->users += 1;
304 spin_unlock(&ubi->ltree_lock);
305
306 kfree(le_free);
307 return le;
308}
309
310
311
312
313
314
315
316
317
318
319static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
320{
321 struct ubi_ltree_entry *le;
322
323 le = ltree_add_entry(ubi, vol_id, lnum);
324 if (IS_ERR(le))
325 return PTR_ERR(le);
326 down_read(&le->mutex);
327 return 0;
328}
329
330
331
332
333
334
335
336static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
337{
338 struct ubi_ltree_entry *le;
339
340 spin_lock(&ubi->ltree_lock);
341 le = ltree_lookup(ubi, vol_id, lnum);
342 le->users -= 1;
343 ubi_assert(le->users >= 0);
344 up_read(&le->mutex);
345 if (le->users == 0) {
346 rb_erase(&le->rb, &ubi->ltree);
347 kfree(le);
348 }
349 spin_unlock(&ubi->ltree_lock);
350}
351
352
353
354
355
356
357
358
359
360
361static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
362{
363 struct ubi_ltree_entry *le;
364
365 le = ltree_add_entry(ubi, vol_id, lnum);
366 if (IS_ERR(le))
367 return PTR_ERR(le);
368 down_write(&le->mutex);
369 return 0;
370}
371
372
373
374
375
376
377
378
379
380
381
382
383static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
384{
385 struct ubi_ltree_entry *le;
386
387 le = ltree_add_entry(ubi, vol_id, lnum);
388 if (IS_ERR(le))
389 return PTR_ERR(le);
390 if (down_write_trylock(&le->mutex))
391 return 0;
392
393
394 spin_lock(&ubi->ltree_lock);
395 le->users -= 1;
396 ubi_assert(le->users >= 0);
397 if (le->users == 0) {
398 rb_erase(&le->rb, &ubi->ltree);
399 kfree(le);
400 }
401 spin_unlock(&ubi->ltree_lock);
402
403 return 1;
404}
405
406
407
408
409
410
411
412static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
413{
414 struct ubi_ltree_entry *le;
415
416 spin_lock(&ubi->ltree_lock);
417 le = ltree_lookup(ubi, vol_id, lnum);
418 le->users -= 1;
419 ubi_assert(le->users >= 0);
420 up_write(&le->mutex);
421 if (le->users == 0) {
422 rb_erase(&le->rb, &ubi->ltree);
423 kfree(le);
424 }
425 spin_unlock(&ubi->ltree_lock);
426}
427
428
429
430
431
432
433
434
435bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
436{
437 return vol->eba_tbl->entries[lnum].pnum >= 0;
438}
439
440
441
442
443
444
445
446
447
448
449
450int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
451 int lnum)
452{
453 int err, pnum, vol_id = vol->vol_id;
454
455 if (ubi->ro_mode)
456 return -EROFS;
457
458 err = leb_write_lock(ubi, vol_id, lnum);
459 if (err)
460 return err;
461
462 pnum = vol->eba_tbl->entries[lnum].pnum;
463 if (pnum < 0)
464
465 goto out_unlock;
466
467 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
468
469 down_read(&ubi->fm_eba_sem);
470 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
471 up_read(&ubi->fm_eba_sem);
472 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
473
474out_unlock:
475 leb_write_unlock(ubi, vol_id, lnum);
476 return err;
477}
478
479#ifdef CONFIG_MTD_UBI_FASTMAP
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
498 int *pnum)
499{
500 int err;
501 struct ubi_vid_io_buf *vidb;
502 struct ubi_vid_hdr *vid_hdr;
503
504 if (!ubi->fast_attach)
505 return 0;
506
507 if (!vol->checkmap || test_bit(lnum, vol->checkmap))
508 return 0;
509
510 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
511 if (!vidb)
512 return -ENOMEM;
513
514 err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
515 if (err > 0 && err != UBI_IO_BITFLIPS) {
516 int torture = 0;
517
518 switch (err) {
519 case UBI_IO_FF:
520 case UBI_IO_FF_BITFLIPS:
521 case UBI_IO_BAD_HDR:
522 case UBI_IO_BAD_HDR_EBADMSG:
523 break;
524 default:
525 ubi_assert(0);
526 }
527
528 if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
529 torture = 1;
530
531 down_read(&ubi->fm_eba_sem);
532 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
533 up_read(&ubi->fm_eba_sem);
534 ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
535
536 *pnum = UBI_LEB_UNMAPPED;
537 } else if (err < 0) {
538 ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
539 *pnum, err);
540
541 goto out_free;
542 } else {
543 int found_vol_id, found_lnum;
544
545 ubi_assert(err == 0 || err == UBI_IO_BITFLIPS);
546
547 vid_hdr = ubi_get_vid_hdr(vidb);
548 found_vol_id = be32_to_cpu(vid_hdr->vol_id);
549 found_lnum = be32_to_cpu(vid_hdr->lnum);
550
551 if (found_lnum != lnum || found_vol_id != vol->vol_id) {
552 ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i",
553 *pnum, found_vol_id, found_lnum, vol->vol_id, lnum);
554 ubi_ro_mode(ubi);
555 err = -EINVAL;
556 goto out_free;
557 }
558 }
559
560 set_bit(lnum, vol->checkmap);
561 err = 0;
562
563out_free:
564 ubi_free_vid_buf(vidb);
565
566 return err;
567}
568#else
569static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
570 int *pnum)
571{
572 return 0;
573}
574#endif
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
596 void *buf, int offset, int len, int check)
597{
598 int err, pnum, scrub = 0, vol_id = vol->vol_id;
599 struct ubi_vid_io_buf *vidb;
600 struct ubi_vid_hdr *vid_hdr;
601 uint32_t crc;
602
603 err = leb_read_lock(ubi, vol_id, lnum);
604 if (err)
605 return err;
606
607 pnum = vol->eba_tbl->entries[lnum].pnum;
608 if (pnum >= 0) {
609 err = check_mapping(ubi, vol, lnum, &pnum);
610 if (err < 0)
611 goto out_unlock;
612 }
613
614 if (pnum == UBI_LEB_UNMAPPED) {
615
616
617
618
619
620 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
621 len, offset, vol_id, lnum);
622 leb_read_unlock(ubi, vol_id, lnum);
623 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
624 memset(buf, 0xFF, len);
625 return 0;
626 }
627
628 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
629 len, offset, vol_id, lnum, pnum);
630
631 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
632 check = 0;
633
634retry:
635 if (check) {
636 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
637 if (!vidb) {
638 err = -ENOMEM;
639 goto out_unlock;
640 }
641
642 vid_hdr = ubi_get_vid_hdr(vidb);
643
644 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
645 if (err && err != UBI_IO_BITFLIPS) {
646 if (err > 0) {
647
648
649
650
651
652
653
654
655 if (err == UBI_IO_BAD_HDR_EBADMSG ||
656 err == UBI_IO_BAD_HDR) {
657 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
658 pnum, vol_id, lnum);
659 err = -EBADMSG;
660 } else {
661
662
663
664
665
666
667
668
669
670
671
672
673
674 if (ubi->fast_attach) {
675 err = -EBADMSG;
676 } else {
677 err = -EINVAL;
678 ubi_ro_mode(ubi);
679 }
680 }
681 }
682 goto out_free;
683 } else if (err == UBI_IO_BITFLIPS)
684 scrub = 1;
685
686 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
687 ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
688
689 crc = be32_to_cpu(vid_hdr->data_crc);
690 ubi_free_vid_buf(vidb);
691 }
692
693 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
694 if (err) {
695 if (err == UBI_IO_BITFLIPS)
696 scrub = 1;
697 else if (mtd_is_eccerr(err)) {
698 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
699 goto out_unlock;
700 scrub = 1;
701 if (!check) {
702 ubi_msg(ubi, "force data checking");
703 check = 1;
704 goto retry;
705 }
706 } else
707 goto out_unlock;
708 }
709
710 if (check) {
711 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
712 if (crc1 != crc) {
713 ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
714 crc1, crc);
715 err = -EBADMSG;
716 goto out_unlock;
717 }
718 }
719
720 if (scrub)
721 err = ubi_wl_scrub_peb(ubi, pnum);
722
723 leb_read_unlock(ubi, vol_id, lnum);
724 return err;
725
726out_free:
727 ubi_free_vid_buf(vidb);
728out_unlock:
729 leb_read_unlock(ubi, vol_id, lnum);
730 return err;
731}
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
748 struct ubi_sgl *sgl, int lnum, int offset, int len,
749 int check)
750{
751 int to_read;
752 int ret;
753 struct scatterlist *sg;
754
755 for (;;) {
756 ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
757 sg = &sgl->sg[sgl->list_pos];
758 if (len < sg->length - sgl->page_pos)
759 to_read = len;
760 else
761 to_read = sg->length - sgl->page_pos;
762
763 ret = ubi_eba_read_leb(ubi, vol, lnum,
764 sg_virt(sg) + sgl->page_pos, offset,
765 to_read, check);
766 if (ret < 0)
767 return ret;
768
769 offset += to_read;
770 len -= to_read;
771 if (!len) {
772 sgl->page_pos += to_read;
773 if (sgl->page_pos == sg->length) {
774 sgl->list_pos++;
775 sgl->page_pos = 0;
776 }
777
778 break;
779 }
780
781 sgl->list_pos++;
782 sgl->page_pos = 0;
783 }
784
785 return ret;
786}
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
807 const void *buf, int offset, int len,
808 struct ubi_vid_io_buf *vidb, bool *retry)
809{
810 struct ubi_device *ubi = vol->ubi;
811 struct ubi_vid_hdr *vid_hdr;
812 int new_pnum, err, vol_id = vol->vol_id, data_size;
813 uint32_t crc;
814
815 *retry = false;
816
817 new_pnum = ubi_wl_get_peb(ubi);
818 if (new_pnum < 0) {
819 err = new_pnum;
820 goto out_put;
821 }
822
823 ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
824 pnum, new_pnum);
825
826 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
827 if (err && err != UBI_IO_BITFLIPS) {
828 if (err > 0)
829 err = -EIO;
830 goto out_put;
831 }
832
833 vid_hdr = ubi_get_vid_hdr(vidb);
834 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
835
836 mutex_lock(&ubi->buf_mutex);
837 memset(ubi->peb_buf + offset, 0xFF, len);
838
839
840 if (offset > 0) {
841 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
842 if (err && err != UBI_IO_BITFLIPS)
843 goto out_unlock;
844 }
845
846 *retry = true;
847
848 memcpy(ubi->peb_buf + offset, buf, len);
849
850 data_size = offset + len;
851 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
852 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
853 vid_hdr->copy_flag = 1;
854 vid_hdr->data_size = cpu_to_be32(data_size);
855 vid_hdr->data_crc = cpu_to_be32(crc);
856 err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
857 if (err)
858 goto out_unlock;
859
860 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
861
862out_unlock:
863 mutex_unlock(&ubi->buf_mutex);
864
865 if (!err)
866 vol->eba_tbl->entries[lnum].pnum = new_pnum;
867
868out_put:
869 up_read(&ubi->fm_eba_sem);
870
871 if (!err) {
872 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
873 ubi_msg(ubi, "data was successfully recovered");
874 } else if (new_pnum >= 0) {
875
876
877
878
879 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
880 ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
881 }
882
883 return err;
884}
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
903 const void *buf, int offset, int len)
904{
905 int err, idx = vol_id2idx(ubi, vol_id), tries;
906 struct ubi_volume *vol = ubi->volumes[idx];
907 struct ubi_vid_io_buf *vidb;
908
909 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
910 if (!vidb)
911 return -ENOMEM;
912
913 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
914 bool retry;
915
916 err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
917 &retry);
918 if (!err || !retry)
919 break;
920
921 ubi_msg(ubi, "try again");
922 }
923
924 ubi_free_vid_buf(vidb);
925
926 return err;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
945 struct ubi_vid_io_buf *vidb, const void *buf,
946 int offset, int len)
947{
948 struct ubi_device *ubi = vol->ubi;
949 int pnum, opnum, err, vol_id = vol->vol_id;
950
951 pnum = ubi_wl_get_peb(ubi);
952 if (pnum < 0) {
953 err = pnum;
954 goto out_put;
955 }
956
957 opnum = vol->eba_tbl->entries[lnum].pnum;
958
959 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
960 len, offset, vol_id, lnum, pnum);
961
962 err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
963 if (err) {
964 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
965 vol_id, lnum, pnum);
966 goto out_put;
967 }
968
969 if (len) {
970 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
971 if (err) {
972 ubi_warn(ubi,
973 "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
974 len, offset, vol_id, lnum, pnum);
975 goto out_put;
976 }
977 }
978
979 vol->eba_tbl->entries[lnum].pnum = pnum;
980
981out_put:
982 up_read(&ubi->fm_eba_sem);
983
984 if (err && pnum >= 0)
985 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
986 else if (!err && opnum >= 0)
987 err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
988
989 return err;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1008 const void *buf, int offset, int len)
1009{
1010 int err, pnum, tries, vol_id = vol->vol_id;
1011 struct ubi_vid_io_buf *vidb;
1012 struct ubi_vid_hdr *vid_hdr;
1013
1014 if (ubi->ro_mode)
1015 return -EROFS;
1016
1017 err = leb_write_lock(ubi, vol_id, lnum);
1018 if (err)
1019 return err;
1020
1021 pnum = vol->eba_tbl->entries[lnum].pnum;
1022 if (pnum >= 0) {
1023 err = check_mapping(ubi, vol, lnum, &pnum);
1024 if (err < 0)
1025 goto out;
1026 }
1027
1028 if (pnum >= 0) {
1029 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
1030 len, offset, vol_id, lnum, pnum);
1031
1032 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
1033 if (err) {
1034 ubi_warn(ubi, "failed to write data to PEB %d", pnum);
1035 if (err == -EIO && ubi->bad_allowed)
1036 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
1037 offset, len);
1038 }
1039
1040 goto out;
1041 }
1042
1043
1044
1045
1046
1047 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1048 if (!vidb) {
1049 leb_write_unlock(ubi, vol_id, lnum);
1050 return -ENOMEM;
1051 }
1052
1053 vid_hdr = ubi_get_vid_hdr(vidb);
1054
1055 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1056 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1057 vid_hdr->vol_id = cpu_to_be32(vol_id);
1058 vid_hdr->lnum = cpu_to_be32(lnum);
1059 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1060 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1061
1062 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1063 err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
1064 if (err != -EIO || !ubi->bad_allowed)
1065 break;
1066
1067
1068
1069
1070
1071
1072
1073 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1074 ubi_msg(ubi, "try another PEB");
1075 }
1076
1077 ubi_free_vid_buf(vidb);
1078
1079out:
1080 if (err)
1081 ubi_ro_mode(ubi);
1082
1083 leb_write_unlock(ubi, vol_id, lnum);
1084
1085 return err;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
1111 int lnum, const void *buf, int len, int used_ebs)
1112{
1113 int err, tries, data_size = len, vol_id = vol->vol_id;
1114 struct ubi_vid_io_buf *vidb;
1115 struct ubi_vid_hdr *vid_hdr;
1116 uint32_t crc;
1117
1118 if (ubi->ro_mode)
1119 return -EROFS;
1120
1121 if (lnum == used_ebs - 1)
1122
1123 len = ALIGN(data_size, ubi->min_io_size);
1124 else
1125 ubi_assert(!(len & (ubi->min_io_size - 1)));
1126
1127 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1128 if (!vidb)
1129 return -ENOMEM;
1130
1131 vid_hdr = ubi_get_vid_hdr(vidb);
1132
1133 err = leb_write_lock(ubi, vol_id, lnum);
1134 if (err)
1135 goto out;
1136
1137 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1138 vid_hdr->vol_id = cpu_to_be32(vol_id);
1139 vid_hdr->lnum = cpu_to_be32(lnum);
1140 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1141 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1142
1143 crc = crc32(UBI_CRC32_INIT, buf, data_size);
1144 vid_hdr->vol_type = UBI_VID_STATIC;
1145 vid_hdr->data_size = cpu_to_be32(data_size);
1146 vid_hdr->used_ebs = cpu_to_be32(used_ebs);
1147 vid_hdr->data_crc = cpu_to_be32(crc);
1148
1149 ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
1150
1151 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1152 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1153 if (err != -EIO || !ubi->bad_allowed)
1154 break;
1155
1156 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1157 ubi_msg(ubi, "try another PEB");
1158 }
1159
1160 if (err)
1161 ubi_ro_mode(ubi);
1162
1163 leb_write_unlock(ubi, vol_id, lnum);
1164
1165out:
1166 ubi_free_vid_buf(vidb);
1167
1168 return err;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
1189 int lnum, const void *buf, int len)
1190{
1191 int err, tries, vol_id = vol->vol_id;
1192 struct ubi_vid_io_buf *vidb;
1193 struct ubi_vid_hdr *vid_hdr;
1194 uint32_t crc;
1195
1196 if (ubi->ro_mode)
1197 return -EROFS;
1198
1199 if (len == 0) {
1200
1201
1202
1203
1204 err = ubi_eba_unmap_leb(ubi, vol, lnum);
1205 if (err)
1206 return err;
1207 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
1208 }
1209
1210 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1211 if (!vidb)
1212 return -ENOMEM;
1213
1214 vid_hdr = ubi_get_vid_hdr(vidb);
1215
1216 mutex_lock(&ubi->alc_mutex);
1217 err = leb_write_lock(ubi, vol_id, lnum);
1218 if (err)
1219 goto out_mutex;
1220
1221 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1222 vid_hdr->vol_id = cpu_to_be32(vol_id);
1223 vid_hdr->lnum = cpu_to_be32(lnum);
1224 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1225 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1226
1227 crc = crc32(UBI_CRC32_INIT, buf, len);
1228 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1229 vid_hdr->data_size = cpu_to_be32(len);
1230 vid_hdr->copy_flag = 1;
1231 vid_hdr->data_crc = cpu_to_be32(crc);
1232
1233 dbg_eba("change LEB %d:%d", vol_id, lnum);
1234
1235 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1236 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1237 if (err != -EIO || !ubi->bad_allowed)
1238 break;
1239
1240 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1241 ubi_msg(ubi, "try another PEB");
1242 }
1243
1244
1245
1246
1247
1248
1249 if (err)
1250 ubi_ro_mode(ubi);
1251
1252 leb_write_unlock(ubi, vol_id, lnum);
1253
1254out_mutex:
1255 mutex_unlock(&ubi->alc_mutex);
1256 ubi_free_vid_buf(vidb);
1257 return err;
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279static int is_error_sane(int err)
1280{
1281 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
1282 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
1283 return 0;
1284 return 1;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1302 struct ubi_vid_io_buf *vidb)
1303{
1304 int err, vol_id, lnum, data_size, aldata_size, idx;
1305 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
1306 struct ubi_volume *vol;
1307 uint32_t crc;
1308
1309 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
1310
1311 vol_id = be32_to_cpu(vid_hdr->vol_id);
1312 lnum = be32_to_cpu(vid_hdr->lnum);
1313
1314 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
1315
1316 if (vid_hdr->vol_type == UBI_VID_STATIC) {
1317 data_size = be32_to_cpu(vid_hdr->data_size);
1318 aldata_size = ALIGN(data_size, ubi->min_io_size);
1319 } else
1320 data_size = aldata_size =
1321 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
1322
1323 idx = vol_id2idx(ubi, vol_id);
1324 spin_lock(&ubi->volumes_lock);
1325
1326
1327
1328
1329
1330
1331 vol = ubi->volumes[idx];
1332 spin_unlock(&ubi->volumes_lock);
1333 if (!vol) {
1334
1335 dbg_wl("volume %d is being removed, cancel", vol_id);
1336 return MOVE_CANCEL_RACE;
1337 }
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 err = leb_write_trylock(ubi, vol_id, lnum);
1355 if (err) {
1356 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1357 return MOVE_RETRY;
1358 }
1359
1360
1361
1362
1363
1364
1365 if (vol->eba_tbl->entries[lnum].pnum != from) {
1366 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1367 vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
1368 err = MOVE_CANCEL_RACE;
1369 goto out_unlock_leb;
1370 }
1371
1372
1373
1374
1375
1376
1377
1378 mutex_lock(&ubi->buf_mutex);
1379 dbg_wl("read %d bytes of data", aldata_size);
1380 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1381 if (err && err != UBI_IO_BITFLIPS) {
1382 ubi_warn(ubi, "error %d while reading data from PEB %d",
1383 err, from);
1384 err = MOVE_SOURCE_RD_ERR;
1385 goto out_unlock_buf;
1386 }
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1399 aldata_size = data_size =
1400 ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
1401
1402 cond_resched();
1403 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
1404 cond_resched();
1405
1406
1407
1408
1409
1410
1411
1412 if (data_size > 0) {
1413 vid_hdr->copy_flag = 1;
1414 vid_hdr->data_size = cpu_to_be32(data_size);
1415 vid_hdr->data_crc = cpu_to_be32(crc);
1416 }
1417 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1418
1419 err = ubi_io_write_vid_hdr(ubi, to, vidb);
1420 if (err) {
1421 if (err == -EIO)
1422 err = MOVE_TARGET_WR_ERR;
1423 goto out_unlock_buf;
1424 }
1425
1426 cond_resched();
1427
1428
1429 err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
1430 if (err) {
1431 if (err != UBI_IO_BITFLIPS) {
1432 ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
1433 err, to);
1434 if (is_error_sane(err))
1435 err = MOVE_TARGET_RD_ERR;
1436 } else
1437 err = MOVE_TARGET_BITFLIPS;
1438 goto out_unlock_buf;
1439 }
1440
1441 if (data_size > 0) {
1442 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1443 if (err) {
1444 if (err == -EIO)
1445 err = MOVE_TARGET_WR_ERR;
1446 goto out_unlock_buf;
1447 }
1448
1449 cond_resched();
1450 }
1451
1452 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
1453 vol->eba_tbl->entries[lnum].pnum = to;
1454
1455out_unlock_buf:
1456 mutex_unlock(&ubi->buf_mutex);
1457out_unlock_leb:
1458 leb_write_unlock(ubi, vol_id, lnum);
1459 return err;
1460}
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481static void print_rsvd_warning(struct ubi_device *ubi,
1482 struct ubi_attach_info *ai)
1483{
1484
1485
1486
1487
1488 if (ai->max_sqnum > (1 << 18)) {
1489 int min = ubi->beb_rsvd_level / 10;
1490
1491 if (!min)
1492 min = 1;
1493 if (ubi->beb_rsvd_pebs > min)
1494 return;
1495 }
1496
1497 ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1498 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1499 if (ubi->corr_peb_count)
1500 ubi_warn(ubi, "%d PEBs are corrupted and not used",
1501 ubi->corr_peb_count);
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1515 struct ubi_attach_info *ai_scan)
1516{
1517 int i, j, num_volumes, ret = 0;
1518 int **scan_eba, **fm_eba;
1519 struct ubi_ainf_volume *av;
1520 struct ubi_volume *vol;
1521 struct ubi_ainf_peb *aeb;
1522 struct rb_node *rb;
1523
1524 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1525
1526 scan_eba = kmalloc_array(num_volumes, sizeof(*scan_eba), GFP_KERNEL);
1527 if (!scan_eba)
1528 return -ENOMEM;
1529
1530 fm_eba = kmalloc_array(num_volumes, sizeof(*fm_eba), GFP_KERNEL);
1531 if (!fm_eba) {
1532 kfree(scan_eba);
1533 return -ENOMEM;
1534 }
1535
1536 for (i = 0; i < num_volumes; i++) {
1537 vol = ubi->volumes[i];
1538 if (!vol)
1539 continue;
1540
1541 scan_eba[i] = kmalloc_array(vol->reserved_pebs,
1542 sizeof(**scan_eba),
1543 GFP_KERNEL);
1544 if (!scan_eba[i]) {
1545 ret = -ENOMEM;
1546 goto out_free;
1547 }
1548
1549 fm_eba[i] = kmalloc_array(vol->reserved_pebs,
1550 sizeof(**fm_eba),
1551 GFP_KERNEL);
1552 if (!fm_eba[i]) {
1553 ret = -ENOMEM;
1554 goto out_free;
1555 }
1556
1557 for (j = 0; j < vol->reserved_pebs; j++)
1558 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1559
1560 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1561 if (!av)
1562 continue;
1563
1564 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1565 scan_eba[i][aeb->lnum] = aeb->pnum;
1566
1567 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1568 if (!av)
1569 continue;
1570
1571 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1572 fm_eba[i][aeb->lnum] = aeb->pnum;
1573
1574 for (j = 0; j < vol->reserved_pebs; j++) {
1575 if (scan_eba[i][j] != fm_eba[i][j]) {
1576 if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1577 fm_eba[i][j] == UBI_LEB_UNMAPPED)
1578 continue;
1579
1580 ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
1581 vol->vol_id, j, fm_eba[i][j],
1582 scan_eba[i][j]);
1583 ubi_assert(0);
1584 }
1585 }
1586 }
1587
1588out_free:
1589 for (i = 0; i < num_volumes; i++) {
1590 if (!ubi->volumes[i])
1591 continue;
1592
1593 kfree(scan_eba[i]);
1594 kfree(fm_eba[i]);
1595 }
1596
1597 kfree(scan_eba);
1598 kfree(fm_eba);
1599 return ret;
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1611{
1612 int i, err, num_volumes;
1613 struct ubi_ainf_volume *av;
1614 struct ubi_volume *vol;
1615 struct ubi_ainf_peb *aeb;
1616 struct rb_node *rb;
1617
1618 dbg_eba("initialize EBA sub-system");
1619
1620 spin_lock_init(&ubi->ltree_lock);
1621 mutex_init(&ubi->alc_mutex);
1622 ubi->ltree = RB_ROOT;
1623
1624 ubi->global_sqnum = ai->max_sqnum + 1;
1625 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1626
1627 for (i = 0; i < num_volumes; i++) {
1628 struct ubi_eba_table *tbl;
1629
1630 vol = ubi->volumes[i];
1631 if (!vol)
1632 continue;
1633
1634 cond_resched();
1635
1636 tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
1637 if (IS_ERR(tbl)) {
1638 err = PTR_ERR(tbl);
1639 goto out_free;
1640 }
1641
1642 ubi_eba_replace_table(vol, tbl);
1643
1644 av = ubi_find_av(ai, idx2vol_id(ubi, i));
1645 if (!av)
1646 continue;
1647
1648 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
1649 if (aeb->lnum >= vol->reserved_pebs) {
1650
1651
1652
1653
1654 ubi_move_aeb_to_list(av, aeb, &ai->erase);
1655 } else {
1656 struct ubi_eba_entry *entry;
1657
1658 entry = &vol->eba_tbl->entries[aeb->lnum];
1659 entry->pnum = aeb->pnum;
1660 }
1661 }
1662 }
1663
1664 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1665 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1666 ubi->avail_pebs, EBA_RESERVED_PEBS);
1667 if (ubi->corr_peb_count)
1668 ubi_err(ubi, "%d PEBs are corrupted and not used",
1669 ubi->corr_peb_count);
1670 err = -ENOSPC;
1671 goto out_free;
1672 }
1673 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1674 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1675
1676 if (ubi->bad_allowed) {
1677 ubi_calculate_reserved(ubi);
1678
1679 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1680
1681 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1682 print_rsvd_warning(ubi, ai);
1683 } else
1684 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1685
1686 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1687 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1688 }
1689
1690 dbg_eba("EBA sub-system is initialized");
1691 return 0;
1692
1693out_free:
1694 for (i = 0; i < num_volumes; i++) {
1695 if (!ubi->volumes[i])
1696 continue;
1697 ubi_eba_replace_table(ubi->volumes[i], NULL);
1698 }
1699 return err;
1700}
1701