1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108#include <linux/slab.h>
109#include <linux/crc32.h>
110#include <linux/freezer.h>
111#include <linux/kthread.h>
112#include "ubi.h"
113
114
115#define WL_RESERVED_PEBS 1
116
117
118
119
120
121
122
123#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
124
125
126
127
128
129
130
131
132
133
134
135
136#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
137
138
139
140
141
142#define WL_MAX_FAILURES 32
143
144
145
146
147
148
149
150
151
152
153
154
155
156struct ubi_work {
157 struct list_head list;
158 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
159
160 struct ubi_wl_entry *e;
161 int torture;
162};
163
164#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
165static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
166static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
167 struct rb_root *root);
168static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
169#else
170#define paranoid_check_ec(ubi, pnum, ec) 0
171#define paranoid_check_in_wl_tree(e, root)
172#define paranoid_check_in_pq(ubi, e) 0
173#endif
174
175
176
177
178
179
180
181
182
183static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
184{
185 struct rb_node **p, *parent = NULL;
186
187 p = &root->rb_node;
188 while (*p) {
189 struct ubi_wl_entry *e1;
190
191 parent = *p;
192 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
193
194 if (e->ec < e1->ec)
195 p = &(*p)->rb_left;
196 else if (e->ec > e1->ec)
197 p = &(*p)->rb_right;
198 else {
199 ubi_assert(e->pnum != e1->pnum);
200 if (e->pnum < e1->pnum)
201 p = &(*p)->rb_left;
202 else
203 p = &(*p)->rb_right;
204 }
205 }
206
207 rb_link_node(&e->u.rb, parent, p);
208 rb_insert_color(&e->u.rb, root);
209}
210
211
212
213
214
215
216
217
218static int do_work(struct ubi_device *ubi)
219{
220 int err;
221 struct ubi_work *wrk;
222
223 cond_resched();
224
225
226
227
228
229
230
231 down_read(&ubi->work_sem);
232 spin_lock(&ubi->wl_lock);
233 if (list_empty(&ubi->works)) {
234 spin_unlock(&ubi->wl_lock);
235 up_read(&ubi->work_sem);
236 return 0;
237 }
238
239 wrk = list_entry(ubi->works.next, struct ubi_work, list);
240 list_del(&wrk->list);
241 ubi->works_count -= 1;
242 ubi_assert(ubi->works_count >= 0);
243 spin_unlock(&ubi->wl_lock);
244
245
246
247
248
249
250 err = wrk->func(ubi, wrk, 0);
251 if (err)
252 ubi_err("work failed with error code %d", err);
253 up_read(&ubi->work_sem);
254
255 return err;
256}
257
258
259
260
261
262
263
264
265
266
267static int produce_free_peb(struct ubi_device *ubi)
268{
269 int err;
270
271 spin_lock(&ubi->wl_lock);
272 while (!ubi->free.rb_node) {
273 spin_unlock(&ubi->wl_lock);
274
275 dbg_wl("do one work synchronously");
276 err = do_work(ubi);
277 if (err)
278 return err;
279
280 spin_lock(&ubi->wl_lock);
281 }
282 spin_unlock(&ubi->wl_lock);
283
284 return 0;
285}
286
287
288
289
290
291
292
293
294
295static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
296{
297 struct rb_node *p;
298
299 p = root->rb_node;
300 while (p) {
301 struct ubi_wl_entry *e1;
302
303 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
304
305 if (e->pnum == e1->pnum) {
306 ubi_assert(e == e1);
307 return 1;
308 }
309
310 if (e->ec < e1->ec)
311 p = p->rb_left;
312 else if (e->ec > e1->ec)
313 p = p->rb_right;
314 else {
315 ubi_assert(e->pnum != e1->pnum);
316 if (e->pnum < e1->pnum)
317 p = p->rb_left;
318 else
319 p = p->rb_right;
320 }
321 }
322
323 return 0;
324}
325
326
327
328
329
330
331
332
333
334
335
336static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
337{
338 int pq_tail = ubi->pq_head - 1;
339
340 if (pq_tail < 0)
341 pq_tail = UBI_PROT_QUEUE_LEN - 1;
342 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
343 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
344 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
345}
346
347
348
349
350
351
352
353
354
355static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
356{
357 struct rb_node *p;
358 struct ubi_wl_entry *e;
359
360 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
361 max += e->ec;
362
363 p = root->rb_node;
364 while (p) {
365 struct ubi_wl_entry *e1;
366
367 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
368 if (e1->ec >= max)
369 p = p->rb_left;
370 else {
371 p = p->rb_right;
372 e = e1;
373 }
374 }
375
376 return e;
377}
378
379
380
381
382
383
384
385
386
387int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
388{
389 int err, medium_ec;
390 struct ubi_wl_entry *e, *first, *last;
391
392 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
393 dtype == UBI_UNKNOWN);
394
395retry:
396 spin_lock(&ubi->wl_lock);
397 if (!ubi->free.rb_node) {
398 if (ubi->works_count == 0) {
399 ubi_assert(list_empty(&ubi->works));
400 ubi_err("no free eraseblocks");
401 spin_unlock(&ubi->wl_lock);
402 return -ENOSPC;
403 }
404 spin_unlock(&ubi->wl_lock);
405
406 err = produce_free_peb(ubi);
407 if (err < 0)
408 return err;
409 goto retry;
410 }
411
412 switch (dtype) {
413 case UBI_LONGTERM:
414
415
416
417
418
419
420 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
421 break;
422 case UBI_UNKNOWN:
423
424
425
426
427
428
429 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
430 u.rb);
431 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
432
433 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
434 e = rb_entry(ubi->free.rb_node,
435 struct ubi_wl_entry, u.rb);
436 else {
437 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
438 e = find_wl_entry(&ubi->free, medium_ec);
439 }
440 break;
441 case UBI_SHORTTERM:
442
443
444
445
446 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
447 break;
448 default:
449 BUG();
450 }
451
452 paranoid_check_in_wl_tree(e, &ubi->free);
453
454
455
456
457
458 rb_erase(&e->u.rb, &ubi->free);
459 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
460 prot_queue_add(ubi, e);
461 spin_unlock(&ubi->wl_lock);
462
463 err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
464 ubi->peb_size - ubi->vid_hdr_aloffset);
465 if (err) {
466 ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
467 return err > 0 ? -EINVAL : err;
468 }
469
470 return e->pnum;
471}
472
473
474
475
476
477
478
479
480
481static int prot_queue_del(struct ubi_device *ubi, int pnum)
482{
483 struct ubi_wl_entry *e;
484
485 e = ubi->lookuptbl[pnum];
486 if (!e)
487 return -ENODEV;
488
489 if (paranoid_check_in_pq(ubi, e))
490 return -ENODEV;
491
492 list_del(&e->u.list);
493 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
494 return 0;
495}
496
497
498
499
500
501
502
503
504
505
506static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
507 int torture)
508{
509 int err;
510 struct ubi_ec_hdr *ec_hdr;
511 unsigned long long ec = e->ec;
512
513 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
514
515 err = paranoid_check_ec(ubi, e->pnum, e->ec);
516 if (err > 0)
517 return -EINVAL;
518
519 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
520 if (!ec_hdr)
521 return -ENOMEM;
522
523 err = ubi_io_sync_erase(ubi, e->pnum, torture);
524 if (err < 0)
525 goto out_free;
526
527 ec += err;
528 if (ec > UBI_MAX_ERASECOUNTER) {
529
530
531
532
533 ubi_err("erase counter overflow at PEB %d, EC %llu",
534 e->pnum, ec);
535 err = -EINVAL;
536 goto out_free;
537 }
538
539 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
540
541 ec_hdr->ec = cpu_to_be64(ec);
542
543 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
544 if (err)
545 goto out_free;
546
547 e->ec = ec;
548 spin_lock(&ubi->wl_lock);
549 if (e->ec > ubi->max_ec)
550 ubi->max_ec = e->ec;
551 spin_unlock(&ubi->wl_lock);
552
553out_free:
554 kfree(ec_hdr);
555 return err;
556}
557
558
559
560
561
562
563
564
565
566static void serve_prot_queue(struct ubi_device *ubi)
567{
568 struct ubi_wl_entry *e, *tmp;
569 int count;
570
571
572
573
574
575repeat:
576 count = 0;
577 spin_lock(&ubi->wl_lock);
578 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
579 dbg_wl("PEB %d EC %d protection over, move to used tree",
580 e->pnum, e->ec);
581
582 list_del(&e->u.list);
583 wl_tree_add(e, &ubi->used);
584 if (count++ > 32) {
585
586
587
588
589 spin_unlock(&ubi->wl_lock);
590 cond_resched();
591 goto repeat;
592 }
593 }
594
595 ubi->pq_head += 1;
596 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
597 ubi->pq_head = 0;
598 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
599 spin_unlock(&ubi->wl_lock);
600}
601
602
603
604
605
606
607
608
609
610static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
611{
612 spin_lock(&ubi->wl_lock);
613 list_add_tail(&wrk->list, &ubi->works);
614 ubi_assert(ubi->works_count >= 0);
615 ubi->works_count += 1;
616 if (ubi->thread_enabled)
617 wake_up_process(ubi->bgt_thread);
618 spin_unlock(&ubi->wl_lock);
619}
620
621static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
622 int cancel);
623
624
625
626
627
628
629
630
631
632
633static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
634 int torture)
635{
636 struct ubi_work *wl_wrk;
637
638 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
639 e->pnum, e->ec, torture);
640
641 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
642 if (!wl_wrk)
643 return -ENOMEM;
644
645 wl_wrk->func = &erase_worker;
646 wl_wrk->e = e;
647 wl_wrk->torture = torture;
648
649 schedule_ubi_work(ubi, wl_wrk);
650 return 0;
651}
652
653
654
655
656
657
658
659
660
661
662
663static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
664 int cancel)
665{
666 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
667 int vol_id = -1, uninitialized_var(lnum);
668 struct ubi_wl_entry *e1, *e2;
669 struct ubi_vid_hdr *vid_hdr;
670
671 kfree(wrk);
672 if (cancel)
673 return 0;
674
675 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
676 if (!vid_hdr)
677 return -ENOMEM;
678
679 mutex_lock(&ubi->move_mutex);
680 spin_lock(&ubi->wl_lock);
681 ubi_assert(!ubi->move_from && !ubi->move_to);
682 ubi_assert(!ubi->move_to_put);
683
684 if (!ubi->free.rb_node ||
685 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
686
687
688
689
690
691
692
693
694
695
696 dbg_wl("cancel WL, a list is empty: free %d, used %d",
697 !ubi->free.rb_node, !ubi->used.rb_node);
698 goto out_cancel;
699 }
700
701 if (!ubi->scrub.rb_node) {
702
703
704
705
706
707 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
708 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
709
710 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
711 dbg_wl("no WL needed: min used EC %d, max free EC %d",
712 e1->ec, e2->ec);
713 goto out_cancel;
714 }
715 paranoid_check_in_wl_tree(e1, &ubi->used);
716 rb_erase(&e1->u.rb, &ubi->used);
717 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
718 e1->pnum, e1->ec, e2->pnum, e2->ec);
719 } else {
720
721 scrubbing = 1;
722 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
723 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
724 paranoid_check_in_wl_tree(e1, &ubi->scrub);
725 rb_erase(&e1->u.rb, &ubi->scrub);
726 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
727 }
728
729 paranoid_check_in_wl_tree(e2, &ubi->free);
730 rb_erase(&e2->u.rb, &ubi->free);
731 ubi->move_from = e1;
732 ubi->move_to = e2;
733 spin_unlock(&ubi->wl_lock);
734
735
736
737
738
739
740
741
742
743
744
745
746 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
747 if (err && err != UBI_IO_BITFLIPS) {
748 if (err == UBI_IO_PEB_FREE) {
749
750
751
752
753
754
755
756
757
758
759 dbg_wl("PEB %d has no VID header", e1->pnum);
760 protect = 1;
761 goto out_not_moved;
762 }
763
764 ubi_err("error %d while reading VID header from PEB %d",
765 err, e1->pnum);
766 goto out_error;
767 }
768
769 vol_id = be32_to_cpu(vid_hdr->vol_id);
770 lnum = be32_to_cpu(vid_hdr->lnum);
771
772 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
773 if (err) {
774 if (err == MOVE_CANCEL_RACE) {
775
776
777
778
779
780
781
782 protect = 1;
783 goto out_not_moved;
784 }
785
786 if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
787 err == MOVE_TARGET_RD_ERR) {
788
789
790
791 torture = 1;
792 goto out_not_moved;
793 }
794
795 if (err == MOVE_SOURCE_RD_ERR) {
796
797
798
799
800
801
802
803
804 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
805 ubi_err("too many erroneous eraseblocks (%d)",
806 ubi->erroneous_peb_count);
807 goto out_error;
808 }
809 erroneous = 1;
810 goto out_not_moved;
811 }
812
813 if (err < 0)
814 goto out_error;
815
816 ubi_assert(0);
817 }
818
819
820 if (scrubbing)
821 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
822 e1->pnum, vol_id, lnum, e2->pnum);
823 ubi_free_vid_hdr(ubi, vid_hdr);
824
825 spin_lock(&ubi->wl_lock);
826 if (!ubi->move_to_put) {
827 wl_tree_add(e2, &ubi->used);
828 e2 = NULL;
829 }
830 ubi->move_from = ubi->move_to = NULL;
831 ubi->move_to_put = ubi->wl_scheduled = 0;
832 spin_unlock(&ubi->wl_lock);
833
834 err = schedule_erase(ubi, e1, 0);
835 if (err) {
836 kmem_cache_free(ubi_wl_entry_slab, e1);
837 if (e2)
838 kmem_cache_free(ubi_wl_entry_slab, e2);
839 goto out_ro;
840 }
841
842 if (e2) {
843
844
845
846
847 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
848 e2->pnum, vol_id, lnum);
849 err = schedule_erase(ubi, e2, 0);
850 if (err) {
851 kmem_cache_free(ubi_wl_entry_slab, e2);
852 goto out_ro;
853 }
854 }
855
856 dbg_wl("done");
857 mutex_unlock(&ubi->move_mutex);
858 return 0;
859
860
861
862
863
864
865out_not_moved:
866 if (vol_id != -1)
867 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
868 e1->pnum, vol_id, lnum, e2->pnum, err);
869 else
870 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
871 e1->pnum, e2->pnum, err);
872 spin_lock(&ubi->wl_lock);
873 if (protect)
874 prot_queue_add(ubi, e1);
875 else if (erroneous) {
876 wl_tree_add(e1, &ubi->erroneous);
877 ubi->erroneous_peb_count += 1;
878 } else if (scrubbing)
879 wl_tree_add(e1, &ubi->scrub);
880 else
881 wl_tree_add(e1, &ubi->used);
882 ubi_assert(!ubi->move_to_put);
883 ubi->move_from = ubi->move_to = NULL;
884 ubi->wl_scheduled = 0;
885 spin_unlock(&ubi->wl_lock);
886
887 ubi_free_vid_hdr(ubi, vid_hdr);
888 err = schedule_erase(ubi, e2, torture);
889 if (err) {
890 kmem_cache_free(ubi_wl_entry_slab, e2);
891 goto out_ro;
892 }
893 mutex_unlock(&ubi->move_mutex);
894 return 0;
895
896out_error:
897 if (vol_id != -1)
898 ubi_err("error %d while moving PEB %d to PEB %d",
899 err, e1->pnum, e2->pnum);
900 else
901 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
902 err, e1->pnum, vol_id, lnum, e2->pnum);
903 spin_lock(&ubi->wl_lock);
904 ubi->move_from = ubi->move_to = NULL;
905 ubi->move_to_put = ubi->wl_scheduled = 0;
906 spin_unlock(&ubi->wl_lock);
907
908 ubi_free_vid_hdr(ubi, vid_hdr);
909 kmem_cache_free(ubi_wl_entry_slab, e1);
910 kmem_cache_free(ubi_wl_entry_slab, e2);
911
912out_ro:
913 ubi_ro_mode(ubi);
914 mutex_unlock(&ubi->move_mutex);
915 ubi_assert(err != 0);
916 return err < 0 ? err : -EIO;
917
918out_cancel:
919 ubi->wl_scheduled = 0;
920 spin_unlock(&ubi->wl_lock);
921 mutex_unlock(&ubi->move_mutex);
922 ubi_free_vid_hdr(ubi, vid_hdr);
923 return 0;
924}
925
926
927
928
929
930
931
932
933
934static int ensure_wear_leveling(struct ubi_device *ubi)
935{
936 int err = 0;
937 struct ubi_wl_entry *e1;
938 struct ubi_wl_entry *e2;
939 struct ubi_work *wrk;
940
941 spin_lock(&ubi->wl_lock);
942 if (ubi->wl_scheduled)
943
944 goto out_unlock;
945
946
947
948
949
950 if (!ubi->scrub.rb_node) {
951 if (!ubi->used.rb_node || !ubi->free.rb_node)
952
953 goto out_unlock;
954
955
956
957
958
959
960
961 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
962 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
963
964 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
965 goto out_unlock;
966 dbg_wl("schedule wear-leveling");
967 } else
968 dbg_wl("schedule scrubbing");
969
970 ubi->wl_scheduled = 1;
971 spin_unlock(&ubi->wl_lock);
972
973 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
974 if (!wrk) {
975 err = -ENOMEM;
976 goto out_cancel;
977 }
978
979 wrk->func = &wear_leveling_worker;
980 schedule_ubi_work(ubi, wrk);
981 return err;
982
983out_cancel:
984 spin_lock(&ubi->wl_lock);
985 ubi->wl_scheduled = 0;
986out_unlock:
987 spin_unlock(&ubi->wl_lock);
988 return err;
989}
990
991
992
993
994
995
996
997
998
999
1000
1001
1002static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1003 int cancel)
1004{
1005 struct ubi_wl_entry *e = wl_wrk->e;
1006 int pnum = e->pnum, err, need;
1007
1008 if (cancel) {
1009 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1010 kfree(wl_wrk);
1011 kmem_cache_free(ubi_wl_entry_slab, e);
1012 return 0;
1013 }
1014
1015 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1016
1017 err = sync_erase(ubi, e, wl_wrk->torture);
1018 if (!err) {
1019
1020 kfree(wl_wrk);
1021
1022 spin_lock(&ubi->wl_lock);
1023 wl_tree_add(e, &ubi->free);
1024 spin_unlock(&ubi->wl_lock);
1025
1026
1027
1028
1029
1030 serve_prot_queue(ubi);
1031
1032
1033 err = ensure_wear_leveling(ubi);
1034 return err;
1035 }
1036
1037 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1038 kfree(wl_wrk);
1039 kmem_cache_free(ubi_wl_entry_slab, e);
1040
1041 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1042 err == -EBUSY) {
1043 int err1;
1044
1045
1046 err1 = schedule_erase(ubi, e, 0);
1047 if (err1) {
1048 err = err1;
1049 goto out_ro;
1050 }
1051 return err;
1052 } else if (err != -EIO) {
1053
1054
1055
1056
1057
1058 goto out_ro;
1059 }
1060
1061
1062
1063 if (!ubi->bad_allowed) {
1064 ubi_err("bad physical eraseblock %d detected", pnum);
1065 goto out_ro;
1066 }
1067
1068 spin_lock(&ubi->volumes_lock);
1069 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1070 if (need > 0) {
1071 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1072 ubi->avail_pebs -= need;
1073 ubi->rsvd_pebs += need;
1074 ubi->beb_rsvd_pebs += need;
1075 if (need > 0)
1076 ubi_msg("reserve more %d PEBs", need);
1077 }
1078
1079 if (ubi->beb_rsvd_pebs == 0) {
1080 spin_unlock(&ubi->volumes_lock);
1081 ubi_err("no reserved physical eraseblocks");
1082 goto out_ro;
1083 }
1084 spin_unlock(&ubi->volumes_lock);
1085
1086 ubi_msg("mark PEB %d as bad", pnum);
1087 err = ubi_io_mark_bad(ubi, pnum);
1088 if (err)
1089 goto out_ro;
1090
1091 spin_lock(&ubi->volumes_lock);
1092 ubi->beb_rsvd_pebs -= 1;
1093 ubi->bad_peb_count += 1;
1094 ubi->good_peb_count -= 1;
1095 ubi_calculate_reserved(ubi);
1096 if (ubi->beb_rsvd_pebs)
1097 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1098 else
1099 ubi_warn("last PEB from the reserved pool was used");
1100 spin_unlock(&ubi->volumes_lock);
1101
1102 return err;
1103
1104out_ro:
1105 ubi_ro_mode(ubi);
1106 return err;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1121{
1122 int err;
1123 struct ubi_wl_entry *e;
1124
1125 dbg_wl("PEB %d", pnum);
1126 ubi_assert(pnum >= 0);
1127 ubi_assert(pnum < ubi->peb_count);
1128
1129retry:
1130 spin_lock(&ubi->wl_lock);
1131 e = ubi->lookuptbl[pnum];
1132 if (e == ubi->move_from) {
1133
1134
1135
1136
1137
1138 dbg_wl("PEB %d is being moved, wait", pnum);
1139 spin_unlock(&ubi->wl_lock);
1140
1141
1142 mutex_lock(&ubi->move_mutex);
1143 mutex_unlock(&ubi->move_mutex);
1144 goto retry;
1145 } else if (e == ubi->move_to) {
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 dbg_wl("PEB %d is the target of data moving", pnum);
1156 ubi_assert(!ubi->move_to_put);
1157 ubi->move_to_put = 1;
1158 spin_unlock(&ubi->wl_lock);
1159 return 0;
1160 } else {
1161 if (in_wl_tree(e, &ubi->used)) {
1162 paranoid_check_in_wl_tree(e, &ubi->used);
1163 rb_erase(&e->u.rb, &ubi->used);
1164 } else if (in_wl_tree(e, &ubi->scrub)) {
1165 paranoid_check_in_wl_tree(e, &ubi->scrub);
1166 rb_erase(&e->u.rb, &ubi->scrub);
1167 } else if (in_wl_tree(e, &ubi->erroneous)) {
1168 paranoid_check_in_wl_tree(e, &ubi->erroneous);
1169 rb_erase(&e->u.rb, &ubi->erroneous);
1170 ubi->erroneous_peb_count -= 1;
1171 ubi_assert(ubi->erroneous_peb_count >= 0);
1172
1173 torture = 1;
1174 } else {
1175 err = prot_queue_del(ubi, e->pnum);
1176 if (err) {
1177 ubi_err("PEB %d not found", pnum);
1178 ubi_ro_mode(ubi);
1179 spin_unlock(&ubi->wl_lock);
1180 return err;
1181 }
1182 }
1183 }
1184 spin_unlock(&ubi->wl_lock);
1185
1186 err = schedule_erase(ubi, e, torture);
1187 if (err) {
1188 spin_lock(&ubi->wl_lock);
1189 wl_tree_add(e, &ubi->used);
1190 spin_unlock(&ubi->wl_lock);
1191 }
1192
1193 return err;
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1207{
1208 struct ubi_wl_entry *e;
1209
1210 dbg_msg("schedule PEB %d for scrubbing", pnum);
1211
1212retry:
1213 spin_lock(&ubi->wl_lock);
1214 e = ubi->lookuptbl[pnum];
1215 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1216 spin_unlock(&ubi->wl_lock);
1217 return 0;
1218 }
1219
1220 if (e == ubi->move_to) {
1221
1222
1223
1224
1225
1226
1227 spin_unlock(&ubi->wl_lock);
1228 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1229 yield();
1230 goto retry;
1231 }
1232
1233 if (in_wl_tree(e, &ubi->used)) {
1234 paranoid_check_in_wl_tree(e, &ubi->used);
1235 rb_erase(&e->u.rb, &ubi->used);
1236 } else {
1237 int err;
1238
1239 err = prot_queue_del(ubi, e->pnum);
1240 if (err) {
1241 ubi_err("PEB %d not found", pnum);
1242 ubi_ro_mode(ubi);
1243 spin_unlock(&ubi->wl_lock);
1244 return err;
1245 }
1246 }
1247
1248 wl_tree_add(e, &ubi->scrub);
1249 spin_unlock(&ubi->wl_lock);
1250
1251
1252
1253
1254
1255 return ensure_wear_leveling(ubi);
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265int ubi_wl_flush(struct ubi_device *ubi)
1266{
1267 int err;
1268
1269
1270
1271
1272
1273 dbg_wl("flush (%d pending works)", ubi->works_count);
1274 while (ubi->works_count) {
1275 err = do_work(ubi);
1276 if (err)
1277 return err;
1278 }
1279
1280
1281
1282
1283
1284 down_write(&ubi->work_sem);
1285 up_write(&ubi->work_sem);
1286
1287
1288
1289
1290
1291 while (ubi->works_count) {
1292 dbg_wl("flush more (%d pending works)", ubi->works_count);
1293 err = do_work(ubi);
1294 if (err)
1295 return err;
1296 }
1297
1298 return 0;
1299}
1300
1301
1302
1303
1304
1305static void tree_destroy(struct rb_root *root)
1306{
1307 struct rb_node *rb;
1308 struct ubi_wl_entry *e;
1309
1310 rb = root->rb_node;
1311 while (rb) {
1312 if (rb->rb_left)
1313 rb = rb->rb_left;
1314 else if (rb->rb_right)
1315 rb = rb->rb_right;
1316 else {
1317 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1318
1319 rb = rb_parent(rb);
1320 if (rb) {
1321 if (rb->rb_left == &e->u.rb)
1322 rb->rb_left = NULL;
1323 else
1324 rb->rb_right = NULL;
1325 }
1326
1327 kmem_cache_free(ubi_wl_entry_slab, e);
1328 }
1329 }
1330}
1331
1332
1333
1334
1335
1336int ubi_thread(void *u)
1337{
1338 int failures = 0;
1339 struct ubi_device *ubi = u;
1340
1341 ubi_msg("background thread \"%s\" started, PID %d",
1342 ubi->bgt_name, task_pid_nr(current));
1343
1344 set_freezable();
1345 for (;;) {
1346 int err;
1347
1348 if (kthread_should_stop())
1349 break;
1350
1351 if (try_to_freeze())
1352 continue;
1353
1354 spin_lock(&ubi->wl_lock);
1355 if (list_empty(&ubi->works) || ubi->ro_mode ||
1356 !ubi->thread_enabled) {
1357 set_current_state(TASK_INTERRUPTIBLE);
1358 spin_unlock(&ubi->wl_lock);
1359 schedule();
1360 continue;
1361 }
1362 spin_unlock(&ubi->wl_lock);
1363
1364 err = do_work(ubi);
1365 if (err) {
1366 ubi_err("%s: work failed with error code %d",
1367 ubi->bgt_name, err);
1368 if (failures++ > WL_MAX_FAILURES) {
1369
1370
1371
1372
1373 ubi_msg("%s: %d consecutive failures",
1374 ubi->bgt_name, WL_MAX_FAILURES);
1375 ubi_ro_mode(ubi);
1376 ubi->thread_enabled = 0;
1377 continue;
1378 }
1379 } else
1380 failures = 0;
1381
1382 cond_resched();
1383 }
1384
1385 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1386 return 0;
1387}
1388
1389
1390
1391
1392
1393static void cancel_pending(struct ubi_device *ubi)
1394{
1395 while (!list_empty(&ubi->works)) {
1396 struct ubi_work *wrk;
1397
1398 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1399 list_del(&wrk->list);
1400 wrk->func(ubi, wrk, 1);
1401 ubi->works_count -= 1;
1402 ubi_assert(ubi->works_count >= 0);
1403 }
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1415{
1416 int err, i;
1417 struct rb_node *rb1, *rb2;
1418 struct ubi_scan_volume *sv;
1419 struct ubi_scan_leb *seb, *tmp;
1420 struct ubi_wl_entry *e;
1421
1422 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1423 spin_lock_init(&ubi->wl_lock);
1424 mutex_init(&ubi->move_mutex);
1425 init_rwsem(&ubi->work_sem);
1426 ubi->max_ec = si->max_ec;
1427 INIT_LIST_HEAD(&ubi->works);
1428
1429 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1430
1431 err = -ENOMEM;
1432 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1433 if (!ubi->lookuptbl)
1434 return err;
1435
1436 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1437 INIT_LIST_HEAD(&ubi->pq[i]);
1438 ubi->pq_head = 0;
1439
1440 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1441 cond_resched();
1442
1443 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1444 if (!e)
1445 goto out_free;
1446
1447 e->pnum = seb->pnum;
1448 e->ec = seb->ec;
1449 ubi->lookuptbl[e->pnum] = e;
1450 if (schedule_erase(ubi, e, 0)) {
1451 kmem_cache_free(ubi_wl_entry_slab, e);
1452 goto out_free;
1453 }
1454 }
1455
1456 list_for_each_entry(seb, &si->free, u.list) {
1457 cond_resched();
1458
1459 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1460 if (!e)
1461 goto out_free;
1462
1463 e->pnum = seb->pnum;
1464 e->ec = seb->ec;
1465 ubi_assert(e->ec >= 0);
1466 wl_tree_add(e, &ubi->free);
1467 ubi->lookuptbl[e->pnum] = e;
1468 }
1469
1470 list_for_each_entry(seb, &si->corr, u.list) {
1471 cond_resched();
1472
1473 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1474 if (!e)
1475 goto out_free;
1476
1477 e->pnum = seb->pnum;
1478 e->ec = seb->ec;
1479 ubi->lookuptbl[e->pnum] = e;
1480 if (schedule_erase(ubi, e, 0)) {
1481 kmem_cache_free(ubi_wl_entry_slab, e);
1482 goto out_free;
1483 }
1484 }
1485
1486 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1487 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1488 cond_resched();
1489
1490 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1491 if (!e)
1492 goto out_free;
1493
1494 e->pnum = seb->pnum;
1495 e->ec = seb->ec;
1496 ubi->lookuptbl[e->pnum] = e;
1497 if (!seb->scrub) {
1498 dbg_wl("add PEB %d EC %d to the used tree",
1499 e->pnum, e->ec);
1500 wl_tree_add(e, &ubi->used);
1501 } else {
1502 dbg_wl("add PEB %d EC %d to the scrub tree",
1503 e->pnum, e->ec);
1504 wl_tree_add(e, &ubi->scrub);
1505 }
1506 }
1507 }
1508
1509 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1510 ubi_err("no enough physical eraseblocks (%d, need %d)",
1511 ubi->avail_pebs, WL_RESERVED_PEBS);
1512 goto out_free;
1513 }
1514 ubi->avail_pebs -= WL_RESERVED_PEBS;
1515 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1516
1517
1518 err = ensure_wear_leveling(ubi);
1519 if (err)
1520 goto out_free;
1521
1522 return 0;
1523
1524out_free:
1525 cancel_pending(ubi);
1526 tree_destroy(&ubi->used);
1527 tree_destroy(&ubi->free);
1528 tree_destroy(&ubi->scrub);
1529 kfree(ubi->lookuptbl);
1530 return err;
1531}
1532
1533
1534
1535
1536
1537static void protection_queue_destroy(struct ubi_device *ubi)
1538{
1539 int i;
1540 struct ubi_wl_entry *e, *tmp;
1541
1542 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1543 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1544 list_del(&e->u.list);
1545 kmem_cache_free(ubi_wl_entry_slab, e);
1546 }
1547 }
1548}
1549
1550
1551
1552
1553
1554void ubi_wl_close(struct ubi_device *ubi)
1555{
1556 dbg_wl("close the WL sub-system");
1557 cancel_pending(ubi);
1558 protection_queue_destroy(ubi);
1559 tree_destroy(&ubi->used);
1560 tree_destroy(&ubi->erroneous);
1561 tree_destroy(&ubi->free);
1562 tree_destroy(&ubi->scrub);
1563 kfree(ubi->lookuptbl);
1564}
1565
1566#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1579{
1580 int err;
1581 long long read_ec;
1582 struct ubi_ec_hdr *ec_hdr;
1583
1584 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1585 if (!ec_hdr)
1586 return -ENOMEM;
1587
1588 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1589 if (err && err != UBI_IO_BITFLIPS) {
1590
1591 err = 0;
1592 goto out_free;
1593 }
1594
1595 read_ec = be64_to_cpu(ec_hdr->ec);
1596 if (ec != read_ec) {
1597 ubi_err("paranoid check failed for PEB %d", pnum);
1598 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1599 ubi_dbg_dump_stack();
1600 err = 1;
1601 } else
1602 err = 0;
1603
1604out_free:
1605 kfree(ec_hdr);
1606 return err;
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1618 struct rb_root *root)
1619{
1620 if (in_wl_tree(e, root))
1621 return 0;
1622
1623 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1624 e->pnum, e->ec, root);
1625 ubi_dbg_dump_stack();
1626 return 1;
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1638{
1639 struct ubi_wl_entry *p;
1640 int i;
1641
1642 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1643 list_for_each_entry(p, &ubi->pq[i], u.list)
1644 if (p == e)
1645 return 0;
1646
1647 ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1648 e->pnum, e->ec);
1649 ubi_dbg_dump_stack();
1650 return 1;
1651}
1652#endif
1653