1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76#ifdef UBI_LINUX
77#include <linux/slab.h>
78#include <linux/crc32.h>
79#include <linux/freezer.h>
80#include <linux/kthread.h>
81#endif
82
83#include <ubi_uboot.h>
84#include "ubi.h"
85
86
87#define WL_RESERVED_PEBS 1
88
89
90
91
92
93#define ST_PROTECTION 16
94#define U_PROTECTION 10
95#define LT_PROTECTION 4
96
97
98
99
100
101
102#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
103
104
105
106
107
108
109
110
111
112
113
114
115#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
116
117
118
119
120
121#define WL_MAX_FAILURES 32
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170struct ubi_wl_prot_entry {
171 struct rb_node rb_pnum;
172 struct rb_node rb_aec;
173 unsigned long long abs_ec;
174 struct ubi_wl_entry *e;
175};
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191struct ubi_work {
192 struct list_head list;
193 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
194
195 struct ubi_wl_entry *e;
196 int torture;
197};
198
199#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
200static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
201static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
202 struct rb_root *root);
203#else
204#define paranoid_check_ec(ubi, pnum, ec) 0
205#define paranoid_check_in_wl_tree(e, root)
206#endif
207
208
209
210
211
212
213
214
215
216static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
217{
218 struct rb_node **p, *parent = NULL;
219
220 p = &root->rb_node;
221 while (*p) {
222 struct ubi_wl_entry *e1;
223
224 parent = *p;
225 e1 = rb_entry(parent, struct ubi_wl_entry, rb);
226
227 if (e->ec < e1->ec)
228 p = &(*p)->rb_left;
229 else if (e->ec > e1->ec)
230 p = &(*p)->rb_right;
231 else {
232 ubi_assert(e->pnum != e1->pnum);
233 if (e->pnum < e1->pnum)
234 p = &(*p)->rb_left;
235 else
236 p = &(*p)->rb_right;
237 }
238 }
239
240 rb_link_node(&e->rb, parent, p);
241 rb_insert_color(&e->rb, root);
242}
243
244
245
246
247
248
249
250
251static int do_work(struct ubi_device *ubi)
252{
253 int err;
254 struct ubi_work *wrk;
255
256 cond_resched();
257
258
259
260
261
262
263
264 down_read(&ubi->work_sem);
265 spin_lock(&ubi->wl_lock);
266 if (list_empty(&ubi->works)) {
267 spin_unlock(&ubi->wl_lock);
268 up_read(&ubi->work_sem);
269 return 0;
270 }
271
272 wrk = list_entry(ubi->works.next, struct ubi_work, list);
273 list_del(&wrk->list);
274 ubi->works_count -= 1;
275 ubi_assert(ubi->works_count >= 0);
276 spin_unlock(&ubi->wl_lock);
277
278
279
280
281
282
283 err = wrk->func(ubi, wrk, 0);
284 if (err)
285 ubi_err("work failed with error code %d", err);
286 up_read(&ubi->work_sem);
287
288 return err;
289}
290
291
292
293
294
295
296
297
298
299
300static int produce_free_peb(struct ubi_device *ubi)
301{
302 int err;
303
304 spin_lock(&ubi->wl_lock);
305 while (!ubi->free.rb_node) {
306 spin_unlock(&ubi->wl_lock);
307
308 dbg_wl("do one work synchronously");
309 err = do_work(ubi);
310 if (err)
311 return err;
312
313 spin_lock(&ubi->wl_lock);
314 }
315 spin_unlock(&ubi->wl_lock);
316
317 return 0;
318}
319
320
321
322
323
324
325
326
327
328static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
329{
330 struct rb_node *p;
331
332 p = root->rb_node;
333 while (p) {
334 struct ubi_wl_entry *e1;
335
336 e1 = rb_entry(p, struct ubi_wl_entry, rb);
337
338 if (e->pnum == e1->pnum) {
339 ubi_assert(e == e1);
340 return 1;
341 }
342
343 if (e->ec < e1->ec)
344 p = p->rb_left;
345 else if (e->ec > e1->ec)
346 p = p->rb_right;
347 else {
348 ubi_assert(e->pnum != e1->pnum);
349 if (e->pnum < e1->pnum)
350 p = p->rb_left;
351 else
352 p = p->rb_right;
353 }
354 }
355
356 return 0;
357}
358
359
360
361
362
363
364
365
366
367
368
369static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
370 struct ubi_wl_prot_entry *pe, int abs_ec)
371{
372 struct rb_node **p, *parent = NULL;
373 struct ubi_wl_prot_entry *pe1;
374
375 pe->e = e;
376 pe->abs_ec = ubi->abs_ec + abs_ec;
377
378 p = &ubi->prot.pnum.rb_node;
379 while (*p) {
380 parent = *p;
381 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
382
383 if (e->pnum < pe1->e->pnum)
384 p = &(*p)->rb_left;
385 else
386 p = &(*p)->rb_right;
387 }
388 rb_link_node(&pe->rb_pnum, parent, p);
389 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
390
391 p = &ubi->prot.aec.rb_node;
392 parent = NULL;
393 while (*p) {
394 parent = *p;
395 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
396
397 if (pe->abs_ec < pe1->abs_ec)
398 p = &(*p)->rb_left;
399 else
400 p = &(*p)->rb_right;
401 }
402 rb_link_node(&pe->rb_aec, parent, p);
403 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
404}
405
406
407
408
409
410
411
412
413
414static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
415{
416 struct rb_node *p;
417 struct ubi_wl_entry *e;
418
419 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
420 max += e->ec;
421
422 p = root->rb_node;
423 while (p) {
424 struct ubi_wl_entry *e1;
425
426 e1 = rb_entry(p, struct ubi_wl_entry, rb);
427 if (e1->ec >= max)
428 p = p->rb_left;
429 else {
430 p = p->rb_right;
431 e = e1;
432 }
433 }
434
435 return e;
436}
437
438
439
440
441
442
443
444
445
446int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
447{
448 int err, protect, medium_ec;
449 struct ubi_wl_entry *e, *first, *last;
450 struct ubi_wl_prot_entry *pe;
451
452 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
453 dtype == UBI_UNKNOWN);
454
455 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
456 if (!pe)
457 return -ENOMEM;
458
459retry:
460 spin_lock(&ubi->wl_lock);
461 if (!ubi->free.rb_node) {
462 if (ubi->works_count == 0) {
463 ubi_assert(list_empty(&ubi->works));
464 ubi_err("no free eraseblocks");
465 spin_unlock(&ubi->wl_lock);
466 kfree(pe);
467 return -ENOSPC;
468 }
469 spin_unlock(&ubi->wl_lock);
470
471 err = produce_free_peb(ubi);
472 if (err < 0) {
473 kfree(pe);
474 return err;
475 }
476 goto retry;
477 }
478
479 switch (dtype) {
480 case UBI_LONGTERM:
481
482
483
484
485
486
487 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
488 protect = LT_PROTECTION;
489 break;
490 case UBI_UNKNOWN:
491
492
493
494
495
496
497
498 first = rb_entry(rb_first(&ubi->free),
499 struct ubi_wl_entry, rb);
500 last = rb_entry(rb_last(&ubi->free),
501 struct ubi_wl_entry, rb);
502
503 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
504 e = rb_entry(ubi->free.rb_node,
505 struct ubi_wl_entry, rb);
506 else {
507 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
508 e = find_wl_entry(&ubi->free, medium_ec);
509 }
510 protect = U_PROTECTION;
511 break;
512 case UBI_SHORTTERM:
513
514
515
516
517
518 e = rb_entry(rb_first(&ubi->free),
519 struct ubi_wl_entry, rb);
520 protect = ST_PROTECTION;
521 break;
522 default:
523 protect = 0;
524 e = NULL;
525 BUG();
526 }
527
528
529
530
531
532 paranoid_check_in_wl_tree(e, &ubi->free);
533 rb_erase(&e->rb, &ubi->free);
534 prot_tree_add(ubi, e, pe, protect);
535
536 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
537 spin_unlock(&ubi->wl_lock);
538
539 return e->pnum;
540}
541
542
543
544
545
546
547
548
549
550
551static int prot_tree_del(struct ubi_device *ubi, int pnum)
552{
553 struct rb_node *p;
554 struct ubi_wl_prot_entry *pe = NULL;
555
556 p = ubi->prot.pnum.rb_node;
557 while (p) {
558
559 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
560
561 if (pnum == pe->e->pnum)
562 goto found;
563
564 if (pnum < pe->e->pnum)
565 p = p->rb_left;
566 else
567 p = p->rb_right;
568 }
569
570 return -ENODEV;
571
572found:
573 ubi_assert(pe->e->pnum == pnum);
574 rb_erase(&pe->rb_aec, &ubi->prot.aec);
575 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
576 kfree(pe);
577 return 0;
578}
579
580
581
582
583
584
585
586
587
588
589static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
590{
591 int err;
592 struct ubi_ec_hdr *ec_hdr;
593 unsigned long long ec = e->ec;
594
595 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
596
597 err = paranoid_check_ec(ubi, e->pnum, e->ec);
598 if (err > 0)
599 return -EINVAL;
600
601 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
602 if (!ec_hdr)
603 return -ENOMEM;
604
605 err = ubi_io_sync_erase(ubi, e->pnum, torture);
606 if (err < 0)
607 goto out_free;
608
609 ec += err;
610 if (ec > UBI_MAX_ERASECOUNTER) {
611
612
613
614
615 ubi_err("erase counter overflow at PEB %d, EC %llu",
616 e->pnum, ec);
617 err = -EINVAL;
618 goto out_free;
619 }
620
621 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
622
623 ec_hdr->ec = cpu_to_be64(ec);
624
625 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
626 if (err)
627 goto out_free;
628
629 e->ec = ec;
630 spin_lock(&ubi->wl_lock);
631 if (e->ec > ubi->max_ec)
632 ubi->max_ec = e->ec;
633 spin_unlock(&ubi->wl_lock);
634
635out_free:
636 kfree(ec_hdr);
637 return err;
638}
639
640
641
642
643
644
645
646
647
648
649
650static void check_protection_over(struct ubi_device *ubi)
651{
652 struct ubi_wl_prot_entry *pe;
653
654
655
656
657
658 while (1) {
659 spin_lock(&ubi->wl_lock);
660 if (!ubi->prot.aec.rb_node) {
661 spin_unlock(&ubi->wl_lock);
662 break;
663 }
664
665 pe = rb_entry(rb_first(&ubi->prot.aec),
666 struct ubi_wl_prot_entry, rb_aec);
667
668 if (pe->abs_ec > ubi->abs_ec) {
669 spin_unlock(&ubi->wl_lock);
670 break;
671 }
672
673 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
674 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
675 rb_erase(&pe->rb_aec, &ubi->prot.aec);
676 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
677 wl_tree_add(pe->e, &ubi->used);
678 spin_unlock(&ubi->wl_lock);
679
680 kfree(pe);
681 cond_resched();
682 }
683}
684
685
686
687
688
689
690
691
692
693static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
694{
695 spin_lock(&ubi->wl_lock);
696 list_add_tail(&wrk->list, &ubi->works);
697 ubi_assert(ubi->works_count >= 0);
698 ubi->works_count += 1;
699
700
701
702
703
704 do_work(ubi);
705
706 spin_unlock(&ubi->wl_lock);
707}
708
709static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
710 int cancel);
711
712
713
714
715
716
717
718
719
720
721static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
722 int torture)
723{
724 struct ubi_work *wl_wrk;
725
726 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
727 e->pnum, e->ec, torture);
728
729 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
730 if (!wl_wrk)
731 return -ENOMEM;
732
733 wl_wrk->func = &erase_worker;
734 wl_wrk->e = e;
735 wl_wrk->torture = torture;
736
737 schedule_ubi_work(ubi, wl_wrk);
738 return 0;
739}
740
741
742
743
744
745
746
747
748
749
750
751static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
752 int cancel)
753{
754 int err, put = 0, scrubbing = 0, protect = 0;
755 struct ubi_wl_prot_entry *uninitialized_var(pe);
756 struct ubi_wl_entry *e1, *e2;
757 struct ubi_vid_hdr *vid_hdr;
758
759 kfree(wrk);
760
761 if (cancel)
762 return 0;
763
764 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
765 if (!vid_hdr)
766 return -ENOMEM;
767
768 mutex_lock(&ubi->move_mutex);
769 spin_lock(&ubi->wl_lock);
770 ubi_assert(!ubi->move_from && !ubi->move_to);
771 ubi_assert(!ubi->move_to_put);
772
773 if (!ubi->free.rb_node ||
774 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
775
776
777
778
779
780
781
782
783
784
785 dbg_wl("cancel WL, a list is empty: free %d, used %d",
786 !ubi->free.rb_node, !ubi->used.rb_node);
787 goto out_cancel;
788 }
789
790 if (!ubi->scrub.rb_node) {
791
792
793
794
795
796 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
797 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
798
799 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
800 dbg_wl("no WL needed: min used EC %d, max free EC %d",
801 e1->ec, e2->ec);
802 goto out_cancel;
803 }
804 paranoid_check_in_wl_tree(e1, &ubi->used);
805 rb_erase(&e1->rb, &ubi->used);
806 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
807 e1->pnum, e1->ec, e2->pnum, e2->ec);
808 } else {
809
810 scrubbing = 1;
811 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
812 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
813 paranoid_check_in_wl_tree(e1, &ubi->scrub);
814 rb_erase(&e1->rb, &ubi->scrub);
815 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
816 }
817
818 paranoid_check_in_wl_tree(e2, &ubi->free);
819 rb_erase(&e2->rb, &ubi->free);
820 ubi->move_from = e1;
821 ubi->move_to = e2;
822 spin_unlock(&ubi->wl_lock);
823
824
825
826
827
828
829
830
831
832
833
834
835 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
836 if (err && err != UBI_IO_BITFLIPS) {
837 if (err == UBI_IO_PEB_FREE) {
838
839
840
841
842
843
844
845
846 dbg_wl("PEB %d has no VID header", e1->pnum);
847 goto out_not_moved;
848 }
849
850 ubi_err("error %d while reading VID header from PEB %d",
851 err, e1->pnum);
852 if (err > 0)
853 err = -EIO;
854 goto out_error;
855 }
856
857 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
858 if (err) {
859
860 if (err < 0)
861 goto out_error;
862 if (err == 1)
863 goto out_not_moved;
864
865
866
867
868
869
870
871
872 dbg_wl("cancelled moving PEB %d", e1->pnum);
873 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
874 if (!pe) {
875 err = -ENOMEM;
876 goto out_error;
877 }
878
879 protect = 1;
880 }
881
882 ubi_free_vid_hdr(ubi, vid_hdr);
883 spin_lock(&ubi->wl_lock);
884 if (protect)
885 prot_tree_add(ubi, e1, pe, protect);
886 if (!ubi->move_to_put)
887 wl_tree_add(e2, &ubi->used);
888 else
889 put = 1;
890 ubi->move_from = ubi->move_to = NULL;
891 ubi->move_to_put = ubi->wl_scheduled = 0;
892 spin_unlock(&ubi->wl_lock);
893
894 if (put) {
895
896
897
898
899 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
900 err = schedule_erase(ubi, e2, 0);
901 if (err)
902 goto out_error;
903 }
904
905 if (!protect) {
906 err = schedule_erase(ubi, e1, 0);
907 if (err)
908 goto out_error;
909 }
910
911
912 dbg_wl("done");
913 mutex_unlock(&ubi->move_mutex);
914 return 0;
915
916
917
918
919
920
921out_not_moved:
922 ubi_free_vid_hdr(ubi, vid_hdr);
923 spin_lock(&ubi->wl_lock);
924 if (scrubbing)
925 wl_tree_add(e1, &ubi->scrub);
926 else
927 wl_tree_add(e1, &ubi->used);
928 ubi->move_from = ubi->move_to = NULL;
929 ubi->move_to_put = ubi->wl_scheduled = 0;
930 spin_unlock(&ubi->wl_lock);
931
932 err = schedule_erase(ubi, e2, 0);
933 if (err)
934 goto out_error;
935
936 mutex_unlock(&ubi->move_mutex);
937 return 0;
938
939out_error:
940 ubi_err("error %d while moving PEB %d to PEB %d",
941 err, e1->pnum, e2->pnum);
942
943 ubi_free_vid_hdr(ubi, vid_hdr);
944 spin_lock(&ubi->wl_lock);
945 ubi->move_from = ubi->move_to = NULL;
946 ubi->move_to_put = ubi->wl_scheduled = 0;
947 spin_unlock(&ubi->wl_lock);
948
949 kmem_cache_free(ubi_wl_entry_slab, e1);
950 kmem_cache_free(ubi_wl_entry_slab, e2);
951 ubi_ro_mode(ubi);
952
953 mutex_unlock(&ubi->move_mutex);
954 return err;
955
956out_cancel:
957 ubi->wl_scheduled = 0;
958 spin_unlock(&ubi->wl_lock);
959 mutex_unlock(&ubi->move_mutex);
960 ubi_free_vid_hdr(ubi, vid_hdr);
961 return 0;
962}
963
964
965
966
967
968
969
970
971
972static int ensure_wear_leveling(struct ubi_device *ubi)
973{
974 int err = 0;
975 struct ubi_wl_entry *e1;
976 struct ubi_wl_entry *e2;
977 struct ubi_work *wrk;
978
979 spin_lock(&ubi->wl_lock);
980 if (ubi->wl_scheduled)
981
982 goto out_unlock;
983
984
985
986
987
988 if (!ubi->scrub.rb_node) {
989 if (!ubi->used.rb_node || !ubi->free.rb_node)
990
991 goto out_unlock;
992
993
994
995
996
997
998
999 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
1000 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
1001
1002 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1003 goto out_unlock;
1004 dbg_wl("schedule wear-leveling");
1005 } else
1006 dbg_wl("schedule scrubbing");
1007
1008 ubi->wl_scheduled = 1;
1009 spin_unlock(&ubi->wl_lock);
1010
1011 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1012 if (!wrk) {
1013 err = -ENOMEM;
1014 goto out_cancel;
1015 }
1016
1017 wrk->func = &wear_leveling_worker;
1018 schedule_ubi_work(ubi, wrk);
1019 return err;
1020
1021out_cancel:
1022 spin_lock(&ubi->wl_lock);
1023 ubi->wl_scheduled = 0;
1024out_unlock:
1025 spin_unlock(&ubi->wl_lock);
1026 return err;
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1041 int cancel)
1042{
1043 struct ubi_wl_entry *e = wl_wrk->e;
1044 int pnum = e->pnum, err, need;
1045
1046 if (cancel) {
1047 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1048 kfree(wl_wrk);
1049 kmem_cache_free(ubi_wl_entry_slab, e);
1050 return 0;
1051 }
1052
1053 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1054
1055 err = sync_erase(ubi, e, wl_wrk->torture);
1056 if (!err) {
1057
1058 kfree(wl_wrk);
1059
1060 spin_lock(&ubi->wl_lock);
1061 ubi->abs_ec += 1;
1062 wl_tree_add(e, &ubi->free);
1063 spin_unlock(&ubi->wl_lock);
1064
1065
1066
1067
1068
1069 check_protection_over(ubi);
1070
1071
1072 err = ensure_wear_leveling(ubi);
1073 return err;
1074 }
1075
1076 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1077 kfree(wl_wrk);
1078 kmem_cache_free(ubi_wl_entry_slab, e);
1079
1080 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1081 err == -EBUSY) {
1082 int err1;
1083
1084
1085 err1 = schedule_erase(ubi, e, 0);
1086 if (err1) {
1087 err = err1;
1088 goto out_ro;
1089 }
1090 return err;
1091 } else if (err != -EIO) {
1092
1093
1094
1095
1096
1097 goto out_ro;
1098 }
1099
1100
1101
1102 if (!ubi->bad_allowed) {
1103 ubi_err("bad physical eraseblock %d detected", pnum);
1104 goto out_ro;
1105 }
1106
1107 spin_lock(&ubi->volumes_lock);
1108 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1109 if (need > 0) {
1110 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1111 ubi->avail_pebs -= need;
1112 ubi->rsvd_pebs += need;
1113 ubi->beb_rsvd_pebs += need;
1114 if (need > 0)
1115 ubi_msg("reserve more %d PEBs", need);
1116 }
1117
1118 if (ubi->beb_rsvd_pebs == 0) {
1119 spin_unlock(&ubi->volumes_lock);
1120 ubi_err("no reserved physical eraseblocks");
1121 goto out_ro;
1122 }
1123
1124 spin_unlock(&ubi->volumes_lock);
1125 ubi_msg("mark PEB %d as bad", pnum);
1126
1127 err = ubi_io_mark_bad(ubi, pnum);
1128 if (err)
1129 goto out_ro;
1130
1131 spin_lock(&ubi->volumes_lock);
1132 ubi->beb_rsvd_pebs -= 1;
1133 ubi->bad_peb_count += 1;
1134 ubi->good_peb_count -= 1;
1135 ubi_calculate_reserved(ubi);
1136 if (ubi->beb_rsvd_pebs == 0)
1137 ubi_warn("last PEB from the reserved pool was used");
1138 spin_unlock(&ubi->volumes_lock);
1139
1140 return err;
1141
1142out_ro:
1143 ubi_ro_mode(ubi);
1144 return err;
1145}
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1159{
1160 int err;
1161 struct ubi_wl_entry *e;
1162
1163 dbg_wl("PEB %d", pnum);
1164 ubi_assert(pnum >= 0);
1165 ubi_assert(pnum < ubi->peb_count);
1166
1167retry:
1168 spin_lock(&ubi->wl_lock);
1169 e = ubi->lookuptbl[pnum];
1170 if (e == ubi->move_from) {
1171
1172
1173
1174
1175
1176 dbg_wl("PEB %d is being moved, wait", pnum);
1177 spin_unlock(&ubi->wl_lock);
1178
1179
1180 mutex_lock(&ubi->move_mutex);
1181 mutex_unlock(&ubi->move_mutex);
1182 goto retry;
1183 } else if (e == ubi->move_to) {
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 dbg_wl("PEB %d is the target of data moving", pnum);
1194 ubi_assert(!ubi->move_to_put);
1195 ubi->move_to_put = 1;
1196 spin_unlock(&ubi->wl_lock);
1197 return 0;
1198 } else {
1199 if (in_wl_tree(e, &ubi->used)) {
1200 paranoid_check_in_wl_tree(e, &ubi->used);
1201 rb_erase(&e->rb, &ubi->used);
1202 } else if (in_wl_tree(e, &ubi->scrub)) {
1203 paranoid_check_in_wl_tree(e, &ubi->scrub);
1204 rb_erase(&e->rb, &ubi->scrub);
1205 } else {
1206 err = prot_tree_del(ubi, e->pnum);
1207 if (err) {
1208 ubi_err("PEB %d not found", pnum);
1209 ubi_ro_mode(ubi);
1210 spin_unlock(&ubi->wl_lock);
1211 return err;
1212 }
1213 }
1214 }
1215 spin_unlock(&ubi->wl_lock);
1216
1217 err = schedule_erase(ubi, e, torture);
1218 if (err) {
1219 spin_lock(&ubi->wl_lock);
1220 wl_tree_add(e, &ubi->used);
1221 spin_unlock(&ubi->wl_lock);
1222 }
1223
1224 return err;
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1238{
1239 struct ubi_wl_entry *e;
1240
1241 ubi_msg("schedule PEB %d for scrubbing", pnum);
1242
1243retry:
1244 spin_lock(&ubi->wl_lock);
1245 e = ubi->lookuptbl[pnum];
1246 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1247 spin_unlock(&ubi->wl_lock);
1248 return 0;
1249 }
1250
1251 if (e == ubi->move_to) {
1252
1253
1254
1255
1256
1257
1258 spin_unlock(&ubi->wl_lock);
1259 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1260 yield();
1261 goto retry;
1262 }
1263
1264 if (in_wl_tree(e, &ubi->used)) {
1265 paranoid_check_in_wl_tree(e, &ubi->used);
1266 rb_erase(&e->rb, &ubi->used);
1267 } else {
1268 int err;
1269
1270 err = prot_tree_del(ubi, e->pnum);
1271 if (err) {
1272 ubi_err("PEB %d not found", pnum);
1273 ubi_ro_mode(ubi);
1274 spin_unlock(&ubi->wl_lock);
1275 return err;
1276 }
1277 }
1278
1279 wl_tree_add(e, &ubi->scrub);
1280 spin_unlock(&ubi->wl_lock);
1281
1282
1283
1284
1285
1286 return ensure_wear_leveling(ubi);
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296int ubi_wl_flush(struct ubi_device *ubi)
1297{
1298 int err;
1299
1300
1301
1302
1303
1304 dbg_wl("flush (%d pending works)", ubi->works_count);
1305 while (ubi->works_count) {
1306 err = do_work(ubi);
1307 if (err)
1308 return err;
1309 }
1310
1311
1312
1313
1314
1315 down_write(&ubi->work_sem);
1316 up_write(&ubi->work_sem);
1317
1318
1319
1320
1321
1322 while (ubi->works_count) {
1323 dbg_wl("flush more (%d pending works)", ubi->works_count);
1324 err = do_work(ubi);
1325 if (err)
1326 return err;
1327 }
1328
1329 return 0;
1330}
1331
1332
1333
1334
1335
1336static void tree_destroy(struct rb_root *root)
1337{
1338 struct rb_node *rb;
1339 struct ubi_wl_entry *e;
1340
1341 rb = root->rb_node;
1342 while (rb) {
1343 if (rb->rb_left)
1344 rb = rb->rb_left;
1345 else if (rb->rb_right)
1346 rb = rb->rb_right;
1347 else {
1348 e = rb_entry(rb, struct ubi_wl_entry, rb);
1349
1350 rb = rb_parent(rb);
1351 if (rb) {
1352 if (rb->rb_left == &e->rb)
1353 rb->rb_left = NULL;
1354 else
1355 rb->rb_right = NULL;
1356 }
1357
1358 kmem_cache_free(ubi_wl_entry_slab, e);
1359 }
1360 }
1361}
1362
1363
1364
1365
1366
1367int ubi_thread(void *u)
1368{
1369 int failures = 0;
1370 struct ubi_device *ubi = u;
1371
1372 ubi_msg("background thread \"%s\" started, PID %d",
1373 ubi->bgt_name, task_pid_nr(current));
1374
1375 set_freezable();
1376 for (;;) {
1377 int err;
1378
1379 if (kthread_should_stop())
1380 break;
1381
1382 if (try_to_freeze())
1383 continue;
1384
1385 spin_lock(&ubi->wl_lock);
1386 if (list_empty(&ubi->works) || ubi->ro_mode ||
1387 !ubi->thread_enabled) {
1388 set_current_state(TASK_INTERRUPTIBLE);
1389 spin_unlock(&ubi->wl_lock);
1390 schedule();
1391 continue;
1392 }
1393 spin_unlock(&ubi->wl_lock);
1394
1395 err = do_work(ubi);
1396 if (err) {
1397 ubi_err("%s: work failed with error code %d",
1398 ubi->bgt_name, err);
1399 if (failures++ > WL_MAX_FAILURES) {
1400
1401
1402
1403
1404 ubi_msg("%s: %d consecutive failures",
1405 ubi->bgt_name, WL_MAX_FAILURES);
1406 ubi_ro_mode(ubi);
1407 break;
1408 }
1409 } else
1410 failures = 0;
1411
1412 cond_resched();
1413 }
1414
1415 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1416 return 0;
1417}
1418
1419
1420
1421
1422
1423static void cancel_pending(struct ubi_device *ubi)
1424{
1425 while (!list_empty(&ubi->works)) {
1426 struct ubi_work *wrk;
1427
1428 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1429 list_del(&wrk->list);
1430 wrk->func(ubi, wrk, 1);
1431 ubi->works_count -= 1;
1432 ubi_assert(ubi->works_count >= 0);
1433 }
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1446{
1447 int err;
1448 struct rb_node *rb1, *rb2;
1449 struct ubi_scan_volume *sv;
1450 struct ubi_scan_leb *seb, *tmp;
1451 struct ubi_wl_entry *e;
1452
1453
1454 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1455 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1456 spin_lock_init(&ubi->wl_lock);
1457 mutex_init(&ubi->move_mutex);
1458 init_rwsem(&ubi->work_sem);
1459 ubi->max_ec = si->max_ec;
1460 INIT_LIST_HEAD(&ubi->works);
1461
1462 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1463
1464 err = -ENOMEM;
1465 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1466 if (!ubi->lookuptbl)
1467 return err;
1468
1469 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1470 cond_resched();
1471
1472 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1473 if (!e)
1474 goto out_free;
1475
1476 e->pnum = seb->pnum;
1477 e->ec = seb->ec;
1478 ubi->lookuptbl[e->pnum] = e;
1479 if (schedule_erase(ubi, e, 0)) {
1480 kmem_cache_free(ubi_wl_entry_slab, e);
1481 goto out_free;
1482 }
1483 }
1484
1485 list_for_each_entry(seb, &si->free, u.list) {
1486 cond_resched();
1487
1488 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1489 if (!e)
1490 goto out_free;
1491
1492 e->pnum = seb->pnum;
1493 e->ec = seb->ec;
1494 ubi_assert(e->ec >= 0);
1495 wl_tree_add(e, &ubi->free);
1496 ubi->lookuptbl[e->pnum] = e;
1497 }
1498
1499 list_for_each_entry(seb, &si->corr, u.list) {
1500 cond_resched();
1501
1502 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1503 if (!e)
1504 goto out_free;
1505
1506 e->pnum = seb->pnum;
1507 e->ec = seb->ec;
1508 ubi->lookuptbl[e->pnum] = e;
1509 if (schedule_erase(ubi, e, 0)) {
1510 kmem_cache_free(ubi_wl_entry_slab, e);
1511 goto out_free;
1512 }
1513 }
1514
1515 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1516 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1517 cond_resched();
1518
1519 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1520 if (!e)
1521 goto out_free;
1522
1523 e->pnum = seb->pnum;
1524 e->ec = seb->ec;
1525 ubi->lookuptbl[e->pnum] = e;
1526 if (!seb->scrub) {
1527 dbg_wl("add PEB %d EC %d to the used tree",
1528 e->pnum, e->ec);
1529 wl_tree_add(e, &ubi->used);
1530 } else {
1531 dbg_wl("add PEB %d EC %d to the scrub tree",
1532 e->pnum, e->ec);
1533 wl_tree_add(e, &ubi->scrub);
1534 }
1535 }
1536 }
1537
1538 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1539 ubi_err("no enough physical eraseblocks (%d, need %d)",
1540 ubi->avail_pebs, WL_RESERVED_PEBS);
1541 goto out_free;
1542 }
1543 ubi->avail_pebs -= WL_RESERVED_PEBS;
1544 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1545
1546
1547 err = ensure_wear_leveling(ubi);
1548 if (err)
1549 goto out_free;
1550
1551 return 0;
1552
1553out_free:
1554 cancel_pending(ubi);
1555 tree_destroy(&ubi->used);
1556 tree_destroy(&ubi->free);
1557 tree_destroy(&ubi->scrub);
1558 kfree(ubi->lookuptbl);
1559 return err;
1560}
1561
1562
1563
1564
1565
1566static void protection_trees_destroy(struct ubi_device *ubi)
1567{
1568 struct rb_node *rb;
1569 struct ubi_wl_prot_entry *pe;
1570
1571 rb = ubi->prot.aec.rb_node;
1572 while (rb) {
1573 if (rb->rb_left)
1574 rb = rb->rb_left;
1575 else if (rb->rb_right)
1576 rb = rb->rb_right;
1577 else {
1578 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1579
1580 rb = rb_parent(rb);
1581 if (rb) {
1582 if (rb->rb_left == &pe->rb_aec)
1583 rb->rb_left = NULL;
1584 else
1585 rb->rb_right = NULL;
1586 }
1587
1588 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1589 kfree(pe);
1590 }
1591 }
1592}
1593
1594
1595
1596
1597
1598void ubi_wl_close(struct ubi_device *ubi)
1599{
1600 dbg_wl("close the UBI wear-leveling unit");
1601
1602 cancel_pending(ubi);
1603 protection_trees_destroy(ubi);
1604 tree_destroy(&ubi->used);
1605 tree_destroy(&ubi->free);
1606 tree_destroy(&ubi->scrub);
1607 kfree(ubi->lookuptbl);
1608}
1609
1610#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1624{
1625 int err;
1626 long long read_ec;
1627 struct ubi_ec_hdr *ec_hdr;
1628
1629 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1630 if (!ec_hdr)
1631 return -ENOMEM;
1632
1633 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1634 if (err && err != UBI_IO_BITFLIPS) {
1635
1636 err = 0;
1637 goto out_free;
1638 }
1639
1640 read_ec = be64_to_cpu(ec_hdr->ec);
1641 if (ec != read_ec) {
1642 ubi_err("paranoid check failed for PEB %d", pnum);
1643 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1644 ubi_dbg_dump_stack();
1645 err = 1;
1646 } else
1647 err = 0;
1648
1649out_free:
1650 kfree(ec_hdr);
1651 return err;
1652}
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1664 struct rb_root *root)
1665{
1666 if (in_wl_tree(e, root))
1667 return 0;
1668
1669 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1670 e->pnum, e->ec, root);
1671 ubi_dbg_dump_stack();
1672 return 1;
1673}
1674
1675#endif
1676