1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109#include <linux/slab.h>
110#include <linux/crc32.h>
111#include <linux/freezer.h>
112#include <linux/kthread.h>
113#include "ubi.h"
114
115
116#define WL_RESERVED_PEBS 1
117
118
119
120
121
122
123
124#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
125
126
127
128
129
130
131
132
133
134
135
136
137#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
138
139
140
141
142
143#define WL_MAX_FAILURES 32
144
145
146
147
148
149
150
151
152
153
154
155
156
157struct ubi_work {
158 struct list_head list;
159 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
160
161 struct ubi_wl_entry *e;
162 int torture;
163};
164
165#ifdef CONFIG_MTD_UBI_DEBUG
166static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
167static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
168 struct ubi_wl_entry *e,
169 struct rb_root *root);
170static int paranoid_check_in_pq(const struct ubi_device *ubi,
171 struct ubi_wl_entry *e);
172#else
173#define paranoid_check_ec(ubi, pnum, ec) 0
174#define paranoid_check_in_wl_tree(ubi, e, root)
175#define paranoid_check_in_pq(ubi, e) 0
176#endif
177
178
179
180
181
182
183
184
185
186static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
187{
188 struct rb_node **p, *parent = NULL;
189
190 p = &root->rb_node;
191 while (*p) {
192 struct ubi_wl_entry *e1;
193
194 parent = *p;
195 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
196
197 if (e->ec < e1->ec)
198 p = &(*p)->rb_left;
199 else if (e->ec > e1->ec)
200 p = &(*p)->rb_right;
201 else {
202 ubi_assert(e->pnum != e1->pnum);
203 if (e->pnum < e1->pnum)
204 p = &(*p)->rb_left;
205 else
206 p = &(*p)->rb_right;
207 }
208 }
209
210 rb_link_node(&e->u.rb, parent, p);
211 rb_insert_color(&e->u.rb, root);
212}
213
214
215
216
217
218
219
220
221static int do_work(struct ubi_device *ubi)
222{
223 int err;
224 struct ubi_work *wrk;
225
226 cond_resched();
227
228
229
230
231
232
233
234 down_read(&ubi->work_sem);
235 spin_lock(&ubi->wl_lock);
236 if (list_empty(&ubi->works)) {
237 spin_unlock(&ubi->wl_lock);
238 up_read(&ubi->work_sem);
239 return 0;
240 }
241
242 wrk = list_entry(ubi->works.next, struct ubi_work, list);
243 list_del(&wrk->list);
244 ubi->works_count -= 1;
245 ubi_assert(ubi->works_count >= 0);
246 spin_unlock(&ubi->wl_lock);
247
248
249
250
251
252
253 err = wrk->func(ubi, wrk, 0);
254 if (err)
255 ubi_err("work failed with error code %d", err);
256 up_read(&ubi->work_sem);
257
258 return err;
259}
260
261
262
263
264
265
266
267
268
269
270static int produce_free_peb(struct ubi_device *ubi)
271{
272 int err;
273
274 spin_lock(&ubi->wl_lock);
275 while (!ubi->free.rb_node) {
276 spin_unlock(&ubi->wl_lock);
277
278 dbg_wl("do one work synchronously");
279 err = do_work(ubi);
280 if (err)
281 return err;
282
283 spin_lock(&ubi->wl_lock);
284 }
285 spin_unlock(&ubi->wl_lock);
286
287 return 0;
288}
289
290
291
292
293
294
295
296
297
298static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
299{
300 struct rb_node *p;
301
302 p = root->rb_node;
303 while (p) {
304 struct ubi_wl_entry *e1;
305
306 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
307
308 if (e->pnum == e1->pnum) {
309 ubi_assert(e == e1);
310 return 1;
311 }
312
313 if (e->ec < e1->ec)
314 p = p->rb_left;
315 else if (e->ec > e1->ec)
316 p = p->rb_right;
317 else {
318 ubi_assert(e->pnum != e1->pnum);
319 if (e->pnum < e1->pnum)
320 p = p->rb_left;
321 else
322 p = p->rb_right;
323 }
324 }
325
326 return 0;
327}
328
329
330
331
332
333
334
335
336
337
338
339static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
340{
341 int pq_tail = ubi->pq_head - 1;
342
343 if (pq_tail < 0)
344 pq_tail = UBI_PROT_QUEUE_LEN - 1;
345 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
346 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
347 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
348}
349
350
351
352
353
354
355
356
357
358static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
359{
360 struct rb_node *p;
361 struct ubi_wl_entry *e;
362 int max;
363
364 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
365 max = e->ec + diff;
366
367 p = root->rb_node;
368 while (p) {
369 struct ubi_wl_entry *e1;
370
371 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
372 if (e1->ec >= max)
373 p = p->rb_left;
374 else {
375 p = p->rb_right;
376 e = e1;
377 }
378 }
379
380 return e;
381}
382
383
384
385
386
387
388
389
390
391int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
392{
393 int err;
394 struct ubi_wl_entry *e, *first, *last;
395
396 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
397 dtype == UBI_UNKNOWN);
398
399retry:
400 spin_lock(&ubi->wl_lock);
401 if (!ubi->free.rb_node) {
402 if (ubi->works_count == 0) {
403 ubi_assert(list_empty(&ubi->works));
404 ubi_err("no free eraseblocks");
405 spin_unlock(&ubi->wl_lock);
406 return -ENOSPC;
407 }
408 spin_unlock(&ubi->wl_lock);
409
410 err = produce_free_peb(ubi);
411 if (err < 0)
412 return err;
413 goto retry;
414 }
415
416 switch (dtype) {
417 case UBI_LONGTERM:
418
419
420
421
422
423
424 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
425 break;
426 case UBI_UNKNOWN:
427
428
429
430
431
432
433 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
434 u.rb);
435 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
436
437 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
438 e = rb_entry(ubi->free.rb_node,
439 struct ubi_wl_entry, u.rb);
440 else
441 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
442 break;
443 case UBI_SHORTTERM:
444
445
446
447
448 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
449 break;
450 default:
451 BUG();
452 }
453
454 paranoid_check_in_wl_tree(ubi, e, &ubi->free);
455
456
457
458
459
460 rb_erase(&e->u.rb, &ubi->free);
461 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
462 prot_queue_add(ubi, e);
463 spin_unlock(&ubi->wl_lock);
464
465 err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
466 ubi->peb_size - ubi->vid_hdr_aloffset);
467 if (err) {
468 ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
469 return err;
470 }
471
472 return e->pnum;
473}
474
475
476
477
478
479
480
481
482
483static int prot_queue_del(struct ubi_device *ubi, int pnum)
484{
485 struct ubi_wl_entry *e;
486
487 e = ubi->lookuptbl[pnum];
488 if (!e)
489 return -ENODEV;
490
491 if (paranoid_check_in_pq(ubi, e))
492 return -ENODEV;
493
494 list_del(&e->u.list);
495 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
496 return 0;
497}
498
499
500
501
502
503
504
505
506
507
508static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
509 int torture)
510{
511 int err;
512 struct ubi_ec_hdr *ec_hdr;
513 unsigned long long ec = e->ec;
514
515 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
516
517 err = paranoid_check_ec(ubi, e->pnum, e->ec);
518 if (err)
519 return -EINVAL;
520
521 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
522 if (!ec_hdr)
523 return -ENOMEM;
524
525 err = ubi_io_sync_erase(ubi, e->pnum, torture);
526 if (err < 0)
527 goto out_free;
528
529 ec += err;
530 if (ec > UBI_MAX_ERASECOUNTER) {
531
532
533
534
535 ubi_err("erase counter overflow at PEB %d, EC %llu",
536 e->pnum, ec);
537 err = -EINVAL;
538 goto out_free;
539 }
540
541 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
542
543 ec_hdr->ec = cpu_to_be64(ec);
544
545 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
546 if (err)
547 goto out_free;
548
549 e->ec = ec;
550 spin_lock(&ubi->wl_lock);
551 if (e->ec > ubi->max_ec)
552 ubi->max_ec = e->ec;
553 spin_unlock(&ubi->wl_lock);
554
555out_free:
556 kfree(ec_hdr);
557 return err;
558}
559
560
561
562
563
564
565
566
567
568static void serve_prot_queue(struct ubi_device *ubi)
569{
570 struct ubi_wl_entry *e, *tmp;
571 int count;
572
573
574
575
576
577repeat:
578 count = 0;
579 spin_lock(&ubi->wl_lock);
580 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
581 dbg_wl("PEB %d EC %d protection over, move to used tree",
582 e->pnum, e->ec);
583
584 list_del(&e->u.list);
585 wl_tree_add(e, &ubi->used);
586 if (count++ > 32) {
587
588
589
590
591 spin_unlock(&ubi->wl_lock);
592 cond_resched();
593 goto repeat;
594 }
595 }
596
597 ubi->pq_head += 1;
598 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
599 ubi->pq_head = 0;
600 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
601 spin_unlock(&ubi->wl_lock);
602}
603
604
605
606
607
608
609
610
611
612static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
613{
614 spin_lock(&ubi->wl_lock);
615 list_add_tail(&wrk->list, &ubi->works);
616 ubi_assert(ubi->works_count >= 0);
617 ubi->works_count += 1;
618 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
619 wake_up_process(ubi->bgt_thread);
620 spin_unlock(&ubi->wl_lock);
621}
622
623static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
624 int cancel);
625
626
627
628
629
630
631
632
633
634
635static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
636 int torture)
637{
638 struct ubi_work *wl_wrk;
639
640 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
641 e->pnum, e->ec, torture);
642
643 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
644 if (!wl_wrk)
645 return -ENOMEM;
646
647 wl_wrk->func = &erase_worker;
648 wl_wrk->e = e;
649 wl_wrk->torture = torture;
650
651 schedule_ubi_work(ubi, wl_wrk);
652 return 0;
653}
654
655
656
657
658
659
660
661
662
663
664
665static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
666 int cancel)
667{
668 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
669 int vol_id = -1, uninitialized_var(lnum);
670 struct ubi_wl_entry *e1, *e2;
671 struct ubi_vid_hdr *vid_hdr;
672
673 kfree(wrk);
674 if (cancel)
675 return 0;
676
677 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
678 if (!vid_hdr)
679 return -ENOMEM;
680
681 mutex_lock(&ubi->move_mutex);
682 spin_lock(&ubi->wl_lock);
683 ubi_assert(!ubi->move_from && !ubi->move_to);
684 ubi_assert(!ubi->move_to_put);
685
686 if (!ubi->free.rb_node ||
687 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
688
689
690
691
692
693
694
695
696
697
698 dbg_wl("cancel WL, a list is empty: free %d, used %d",
699 !ubi->free.rb_node, !ubi->used.rb_node);
700 goto out_cancel;
701 }
702
703 if (!ubi->scrub.rb_node) {
704
705
706
707
708
709 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
710 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
711
712 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
713 dbg_wl("no WL needed: min used EC %d, max free EC %d",
714 e1->ec, e2->ec);
715 goto out_cancel;
716 }
717 paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
718 rb_erase(&e1->u.rb, &ubi->used);
719 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
720 e1->pnum, e1->ec, e2->pnum, e2->ec);
721 } else {
722
723 scrubbing = 1;
724 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
725 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
726 paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub);
727 rb_erase(&e1->u.rb, &ubi->scrub);
728 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
729 }
730
731 paranoid_check_in_wl_tree(ubi, e2, &ubi->free);
732 rb_erase(&e2->u.rb, &ubi->free);
733 ubi->move_from = e1;
734 ubi->move_to = e2;
735 spin_unlock(&ubi->wl_lock);
736
737
738
739
740
741
742
743
744
745
746
747
748 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
749 if (err && err != UBI_IO_BITFLIPS) {
750 if (err == UBI_IO_FF) {
751
752
753
754
755
756
757
758
759
760
761 dbg_wl("PEB %d has no VID header", e1->pnum);
762 protect = 1;
763 goto out_not_moved;
764 } else if (err == UBI_IO_FF_BITFLIPS) {
765
766
767
768
769
770 dbg_wl("PEB %d has no VID header but has bit-flips",
771 e1->pnum);
772 scrubbing = 1;
773 goto out_not_moved;
774 }
775
776 ubi_err("error %d while reading VID header from PEB %d",
777 err, e1->pnum);
778 goto out_error;
779 }
780
781 vol_id = be32_to_cpu(vid_hdr->vol_id);
782 lnum = be32_to_cpu(vid_hdr->lnum);
783
784 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
785 if (err) {
786 if (err == MOVE_CANCEL_RACE) {
787
788
789
790
791
792
793
794 protect = 1;
795 goto out_not_moved;
796 }
797 if (err == MOVE_RETRY) {
798 scrubbing = 1;
799 goto out_not_moved;
800 }
801 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
802 err == MOVE_TARGET_RD_ERR) {
803
804
805
806 torture = 1;
807 goto out_not_moved;
808 }
809
810 if (err == MOVE_SOURCE_RD_ERR) {
811
812
813
814
815
816
817
818
819 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
820 ubi_err("too many erroneous eraseblocks (%d)",
821 ubi->erroneous_peb_count);
822 goto out_error;
823 }
824 erroneous = 1;
825 goto out_not_moved;
826 }
827
828 if (err < 0)
829 goto out_error;
830
831 ubi_assert(0);
832 }
833
834
835 if (scrubbing)
836 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
837 e1->pnum, vol_id, lnum, e2->pnum);
838 ubi_free_vid_hdr(ubi, vid_hdr);
839
840 spin_lock(&ubi->wl_lock);
841 if (!ubi->move_to_put) {
842 wl_tree_add(e2, &ubi->used);
843 e2 = NULL;
844 }
845 ubi->move_from = ubi->move_to = NULL;
846 ubi->move_to_put = ubi->wl_scheduled = 0;
847 spin_unlock(&ubi->wl_lock);
848
849 err = schedule_erase(ubi, e1, 0);
850 if (err) {
851 kmem_cache_free(ubi_wl_entry_slab, e1);
852 if (e2)
853 kmem_cache_free(ubi_wl_entry_slab, e2);
854 goto out_ro;
855 }
856
857 if (e2) {
858
859
860
861
862 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
863 e2->pnum, vol_id, lnum);
864 err = schedule_erase(ubi, e2, 0);
865 if (err) {
866 kmem_cache_free(ubi_wl_entry_slab, e2);
867 goto out_ro;
868 }
869 }
870
871 dbg_wl("done");
872 mutex_unlock(&ubi->move_mutex);
873 return 0;
874
875
876
877
878
879
880out_not_moved:
881 if (vol_id != -1)
882 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
883 e1->pnum, vol_id, lnum, e2->pnum, err);
884 else
885 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
886 e1->pnum, e2->pnum, err);
887 spin_lock(&ubi->wl_lock);
888 if (protect)
889 prot_queue_add(ubi, e1);
890 else if (erroneous) {
891 wl_tree_add(e1, &ubi->erroneous);
892 ubi->erroneous_peb_count += 1;
893 } else if (scrubbing)
894 wl_tree_add(e1, &ubi->scrub);
895 else
896 wl_tree_add(e1, &ubi->used);
897 ubi_assert(!ubi->move_to_put);
898 ubi->move_from = ubi->move_to = NULL;
899 ubi->wl_scheduled = 0;
900 spin_unlock(&ubi->wl_lock);
901
902 ubi_free_vid_hdr(ubi, vid_hdr);
903 err = schedule_erase(ubi, e2, torture);
904 if (err) {
905 kmem_cache_free(ubi_wl_entry_slab, e2);
906 goto out_ro;
907 }
908 mutex_unlock(&ubi->move_mutex);
909 return 0;
910
911out_error:
912 if (vol_id != -1)
913 ubi_err("error %d while moving PEB %d to PEB %d",
914 err, e1->pnum, e2->pnum);
915 else
916 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
917 err, e1->pnum, vol_id, lnum, e2->pnum);
918 spin_lock(&ubi->wl_lock);
919 ubi->move_from = ubi->move_to = NULL;
920 ubi->move_to_put = ubi->wl_scheduled = 0;
921 spin_unlock(&ubi->wl_lock);
922
923 ubi_free_vid_hdr(ubi, vid_hdr);
924 kmem_cache_free(ubi_wl_entry_slab, e1);
925 kmem_cache_free(ubi_wl_entry_slab, e2);
926
927out_ro:
928 ubi_ro_mode(ubi);
929 mutex_unlock(&ubi->move_mutex);
930 ubi_assert(err != 0);
931 return err < 0 ? err : -EIO;
932
933out_cancel:
934 ubi->wl_scheduled = 0;
935 spin_unlock(&ubi->wl_lock);
936 mutex_unlock(&ubi->move_mutex);
937 ubi_free_vid_hdr(ubi, vid_hdr);
938 return 0;
939}
940
941
942
943
944
945
946
947
948
949static int ensure_wear_leveling(struct ubi_device *ubi)
950{
951 int err = 0;
952 struct ubi_wl_entry *e1;
953 struct ubi_wl_entry *e2;
954 struct ubi_work *wrk;
955
956 spin_lock(&ubi->wl_lock);
957 if (ubi->wl_scheduled)
958
959 goto out_unlock;
960
961
962
963
964
965 if (!ubi->scrub.rb_node) {
966 if (!ubi->used.rb_node || !ubi->free.rb_node)
967
968 goto out_unlock;
969
970
971
972
973
974
975
976 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
977 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
978
979 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
980 goto out_unlock;
981 dbg_wl("schedule wear-leveling");
982 } else
983 dbg_wl("schedule scrubbing");
984
985 ubi->wl_scheduled = 1;
986 spin_unlock(&ubi->wl_lock);
987
988 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
989 if (!wrk) {
990 err = -ENOMEM;
991 goto out_cancel;
992 }
993
994 wrk->func = &wear_leveling_worker;
995 schedule_ubi_work(ubi, wrk);
996 return err;
997
998out_cancel:
999 spin_lock(&ubi->wl_lock);
1000 ubi->wl_scheduled = 0;
1001out_unlock:
1002 spin_unlock(&ubi->wl_lock);
1003 return err;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1018 int cancel)
1019{
1020 struct ubi_wl_entry *e = wl_wrk->e;
1021 int pnum = e->pnum, err, need;
1022
1023 if (cancel) {
1024 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1025 kfree(wl_wrk);
1026 kmem_cache_free(ubi_wl_entry_slab, e);
1027 return 0;
1028 }
1029
1030 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1031
1032 err = sync_erase(ubi, e, wl_wrk->torture);
1033 if (!err) {
1034
1035 kfree(wl_wrk);
1036
1037 spin_lock(&ubi->wl_lock);
1038 wl_tree_add(e, &ubi->free);
1039 spin_unlock(&ubi->wl_lock);
1040
1041
1042
1043
1044
1045 serve_prot_queue(ubi);
1046
1047
1048 err = ensure_wear_leveling(ubi);
1049 return err;
1050 }
1051
1052 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1053 kfree(wl_wrk);
1054
1055 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1056 err == -EBUSY) {
1057 int err1;
1058
1059
1060 err1 = schedule_erase(ubi, e, 0);
1061 if (err1) {
1062 err = err1;
1063 goto out_ro;
1064 }
1065 return err;
1066 }
1067
1068 kmem_cache_free(ubi_wl_entry_slab, e);
1069 if (err != -EIO)
1070
1071
1072
1073
1074
1075 goto out_ro;
1076
1077
1078
1079 if (!ubi->bad_allowed) {
1080 ubi_err("bad physical eraseblock %d detected", pnum);
1081 goto out_ro;
1082 }
1083
1084 spin_lock(&ubi->volumes_lock);
1085 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1086 if (need > 0) {
1087 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1088 ubi->avail_pebs -= need;
1089 ubi->rsvd_pebs += need;
1090 ubi->beb_rsvd_pebs += need;
1091 if (need > 0)
1092 ubi_msg("reserve more %d PEBs", need);
1093 }
1094
1095 if (ubi->beb_rsvd_pebs == 0) {
1096 spin_unlock(&ubi->volumes_lock);
1097 ubi_err("no reserved physical eraseblocks");
1098 goto out_ro;
1099 }
1100 spin_unlock(&ubi->volumes_lock);
1101
1102 ubi_msg("mark PEB %d as bad", pnum);
1103 err = ubi_io_mark_bad(ubi, pnum);
1104 if (err)
1105 goto out_ro;
1106
1107 spin_lock(&ubi->volumes_lock);
1108 ubi->beb_rsvd_pebs -= 1;
1109 ubi->bad_peb_count += 1;
1110 ubi->good_peb_count -= 1;
1111 ubi_calculate_reserved(ubi);
1112 if (ubi->beb_rsvd_pebs)
1113 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1114 else
1115 ubi_warn("last PEB from the reserved pool was used");
1116 spin_unlock(&ubi->volumes_lock);
1117
1118 return err;
1119
1120out_ro:
1121 ubi_ro_mode(ubi);
1122 return err;
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1137{
1138 int err;
1139 struct ubi_wl_entry *e;
1140
1141 dbg_wl("PEB %d", pnum);
1142 ubi_assert(pnum >= 0);
1143 ubi_assert(pnum < ubi->peb_count);
1144
1145retry:
1146 spin_lock(&ubi->wl_lock);
1147 e = ubi->lookuptbl[pnum];
1148 if (e == ubi->move_from) {
1149
1150
1151
1152
1153
1154 dbg_wl("PEB %d is being moved, wait", pnum);
1155 spin_unlock(&ubi->wl_lock);
1156
1157
1158 mutex_lock(&ubi->move_mutex);
1159 mutex_unlock(&ubi->move_mutex);
1160 goto retry;
1161 } else if (e == ubi->move_to) {
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171 dbg_wl("PEB %d is the target of data moving", pnum);
1172 ubi_assert(!ubi->move_to_put);
1173 ubi->move_to_put = 1;
1174 spin_unlock(&ubi->wl_lock);
1175 return 0;
1176 } else {
1177 if (in_wl_tree(e, &ubi->used)) {
1178 paranoid_check_in_wl_tree(ubi, e, &ubi->used);
1179 rb_erase(&e->u.rb, &ubi->used);
1180 } else if (in_wl_tree(e, &ubi->scrub)) {
1181 paranoid_check_in_wl_tree(ubi, e, &ubi->scrub);
1182 rb_erase(&e->u.rb, &ubi->scrub);
1183 } else if (in_wl_tree(e, &ubi->erroneous)) {
1184 paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous);
1185 rb_erase(&e->u.rb, &ubi->erroneous);
1186 ubi->erroneous_peb_count -= 1;
1187 ubi_assert(ubi->erroneous_peb_count >= 0);
1188
1189 torture = 1;
1190 } else {
1191 err = prot_queue_del(ubi, e->pnum);
1192 if (err) {
1193 ubi_err("PEB %d not found", pnum);
1194 ubi_ro_mode(ubi);
1195 spin_unlock(&ubi->wl_lock);
1196 return err;
1197 }
1198 }
1199 }
1200 spin_unlock(&ubi->wl_lock);
1201
1202 err = schedule_erase(ubi, e, torture);
1203 if (err) {
1204 spin_lock(&ubi->wl_lock);
1205 wl_tree_add(e, &ubi->used);
1206 spin_unlock(&ubi->wl_lock);
1207 }
1208
1209 return err;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1223{
1224 struct ubi_wl_entry *e;
1225
1226 dbg_msg("schedule PEB %d for scrubbing", pnum);
1227
1228retry:
1229 spin_lock(&ubi->wl_lock);
1230 e = ubi->lookuptbl[pnum];
1231 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1232 in_wl_tree(e, &ubi->erroneous)) {
1233 spin_unlock(&ubi->wl_lock);
1234 return 0;
1235 }
1236
1237 if (e == ubi->move_to) {
1238
1239
1240
1241
1242
1243
1244 spin_unlock(&ubi->wl_lock);
1245 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1246 yield();
1247 goto retry;
1248 }
1249
1250 if (in_wl_tree(e, &ubi->used)) {
1251 paranoid_check_in_wl_tree(ubi, e, &ubi->used);
1252 rb_erase(&e->u.rb, &ubi->used);
1253 } else {
1254 int err;
1255
1256 err = prot_queue_del(ubi, e->pnum);
1257 if (err) {
1258 ubi_err("PEB %d not found", pnum);
1259 ubi_ro_mode(ubi);
1260 spin_unlock(&ubi->wl_lock);
1261 return err;
1262 }
1263 }
1264
1265 wl_tree_add(e, &ubi->scrub);
1266 spin_unlock(&ubi->wl_lock);
1267
1268
1269
1270
1271
1272 return ensure_wear_leveling(ubi);
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282int ubi_wl_flush(struct ubi_device *ubi)
1283{
1284 int err;
1285
1286
1287
1288
1289
1290 dbg_wl("flush (%d pending works)", ubi->works_count);
1291 while (ubi->works_count) {
1292 err = do_work(ubi);
1293 if (err)
1294 return err;
1295 }
1296
1297
1298
1299
1300
1301 down_write(&ubi->work_sem);
1302 up_write(&ubi->work_sem);
1303
1304
1305
1306
1307
1308 while (ubi->works_count) {
1309 dbg_wl("flush more (%d pending works)", ubi->works_count);
1310 err = do_work(ubi);
1311 if (err)
1312 return err;
1313 }
1314
1315 return 0;
1316}
1317
1318
1319
1320
1321
1322static void tree_destroy(struct rb_root *root)
1323{
1324 struct rb_node *rb;
1325 struct ubi_wl_entry *e;
1326
1327 rb = root->rb_node;
1328 while (rb) {
1329 if (rb->rb_left)
1330 rb = rb->rb_left;
1331 else if (rb->rb_right)
1332 rb = rb->rb_right;
1333 else {
1334 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1335
1336 rb = rb_parent(rb);
1337 if (rb) {
1338 if (rb->rb_left == &e->u.rb)
1339 rb->rb_left = NULL;
1340 else
1341 rb->rb_right = NULL;
1342 }
1343
1344 kmem_cache_free(ubi_wl_entry_slab, e);
1345 }
1346 }
1347}
1348
1349
1350
1351
1352
1353int ubi_thread(void *u)
1354{
1355 int failures = 0;
1356 struct ubi_device *ubi = u;
1357
1358 ubi_msg("background thread \"%s\" started, PID %d",
1359 ubi->bgt_name, task_pid_nr(current));
1360
1361 set_freezable();
1362 for (;;) {
1363 int err;
1364
1365 if (kthread_should_stop())
1366 break;
1367
1368 if (try_to_freeze())
1369 continue;
1370
1371 spin_lock(&ubi->wl_lock);
1372 if (list_empty(&ubi->works) || ubi->ro_mode ||
1373 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1374 set_current_state(TASK_INTERRUPTIBLE);
1375 spin_unlock(&ubi->wl_lock);
1376 schedule();
1377 continue;
1378 }
1379 spin_unlock(&ubi->wl_lock);
1380
1381 err = do_work(ubi);
1382 if (err) {
1383 ubi_err("%s: work failed with error code %d",
1384 ubi->bgt_name, err);
1385 if (failures++ > WL_MAX_FAILURES) {
1386
1387
1388
1389
1390 ubi_msg("%s: %d consecutive failures",
1391 ubi->bgt_name, WL_MAX_FAILURES);
1392 ubi_ro_mode(ubi);
1393 ubi->thread_enabled = 0;
1394 continue;
1395 }
1396 } else
1397 failures = 0;
1398
1399 cond_resched();
1400 }
1401
1402 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1403 return 0;
1404}
1405
1406
1407
1408
1409
1410static void cancel_pending(struct ubi_device *ubi)
1411{
1412 while (!list_empty(&ubi->works)) {
1413 struct ubi_work *wrk;
1414
1415 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1416 list_del(&wrk->list);
1417 wrk->func(ubi, wrk, 1);
1418 ubi->works_count -= 1;
1419 ubi_assert(ubi->works_count >= 0);
1420 }
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1432{
1433 int err, i;
1434 struct rb_node *rb1, *rb2;
1435 struct ubi_scan_volume *sv;
1436 struct ubi_scan_leb *seb, *tmp;
1437 struct ubi_wl_entry *e;
1438
1439 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1440 spin_lock_init(&ubi->wl_lock);
1441 mutex_init(&ubi->move_mutex);
1442 init_rwsem(&ubi->work_sem);
1443 ubi->max_ec = si->max_ec;
1444 INIT_LIST_HEAD(&ubi->works);
1445
1446 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1447
1448 err = -ENOMEM;
1449 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1450 if (!ubi->lookuptbl)
1451 return err;
1452
1453 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1454 INIT_LIST_HEAD(&ubi->pq[i]);
1455 ubi->pq_head = 0;
1456
1457 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1458 cond_resched();
1459
1460 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1461 if (!e)
1462 goto out_free;
1463
1464 e->pnum = seb->pnum;
1465 e->ec = seb->ec;
1466 ubi->lookuptbl[e->pnum] = e;
1467 if (schedule_erase(ubi, e, 0)) {
1468 kmem_cache_free(ubi_wl_entry_slab, e);
1469 goto out_free;
1470 }
1471 }
1472
1473 list_for_each_entry(seb, &si->free, u.list) {
1474 cond_resched();
1475
1476 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1477 if (!e)
1478 goto out_free;
1479
1480 e->pnum = seb->pnum;
1481 e->ec = seb->ec;
1482 ubi_assert(e->ec >= 0);
1483 wl_tree_add(e, &ubi->free);
1484 ubi->lookuptbl[e->pnum] = e;
1485 }
1486
1487 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1488 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1489 cond_resched();
1490
1491 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1492 if (!e)
1493 goto out_free;
1494
1495 e->pnum = seb->pnum;
1496 e->ec = seb->ec;
1497 ubi->lookuptbl[e->pnum] = e;
1498 if (!seb->scrub) {
1499 dbg_wl("add PEB %d EC %d to the used tree",
1500 e->pnum, e->ec);
1501 wl_tree_add(e, &ubi->used);
1502 } else {
1503 dbg_wl("add PEB %d EC %d to the scrub tree",
1504 e->pnum, e->ec);
1505 wl_tree_add(e, &ubi->scrub);
1506 }
1507 }
1508 }
1509
1510 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1511 ubi_err("no enough physical eraseblocks (%d, need %d)",
1512 ubi->avail_pebs, WL_RESERVED_PEBS);
1513 if (ubi->corr_peb_count)
1514 ubi_err("%d PEBs are corrupted and not used",
1515 ubi->corr_peb_count);
1516 goto out_free;
1517 }
1518 ubi->avail_pebs -= WL_RESERVED_PEBS;
1519 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1520
1521
1522 err = ensure_wear_leveling(ubi);
1523 if (err)
1524 goto out_free;
1525
1526 return 0;
1527
1528out_free:
1529 cancel_pending(ubi);
1530 tree_destroy(&ubi->used);
1531 tree_destroy(&ubi->free);
1532 tree_destroy(&ubi->scrub);
1533 kfree(ubi->lookuptbl);
1534 return err;
1535}
1536
1537
1538
1539
1540
1541static void protection_queue_destroy(struct ubi_device *ubi)
1542{
1543 int i;
1544 struct ubi_wl_entry *e, *tmp;
1545
1546 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1547 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1548 list_del(&e->u.list);
1549 kmem_cache_free(ubi_wl_entry_slab, e);
1550 }
1551 }
1552}
1553
1554
1555
1556
1557
1558void ubi_wl_close(struct ubi_device *ubi)
1559{
1560 dbg_wl("close the WL sub-system");
1561 cancel_pending(ubi);
1562 protection_queue_destroy(ubi);
1563 tree_destroy(&ubi->used);
1564 tree_destroy(&ubi->erroneous);
1565 tree_destroy(&ubi->free);
1566 tree_destroy(&ubi->scrub);
1567 kfree(ubi->lookuptbl);
1568}
1569
1570#ifdef CONFIG_MTD_UBI_DEBUG
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1583{
1584 int err;
1585 long long read_ec;
1586 struct ubi_ec_hdr *ec_hdr;
1587
1588 if (!ubi->dbg->chk_gen)
1589 return 0;
1590
1591 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1592 if (!ec_hdr)
1593 return -ENOMEM;
1594
1595 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1596 if (err && err != UBI_IO_BITFLIPS) {
1597
1598 err = 0;
1599 goto out_free;
1600 }
1601
1602 read_ec = be64_to_cpu(ec_hdr->ec);
1603 if (ec != read_ec) {
1604 ubi_err("paranoid check failed for PEB %d", pnum);
1605 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1606 ubi_dbg_dump_stack();
1607 err = 1;
1608 } else
1609 err = 0;
1610
1611out_free:
1612 kfree(ec_hdr);
1613 return err;
1614}
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
1626 struct ubi_wl_entry *e,
1627 struct rb_root *root)
1628{
1629 if (!ubi->dbg->chk_gen)
1630 return 0;
1631
1632 if (in_wl_tree(e, root))
1633 return 0;
1634
1635 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1636 e->pnum, e->ec, root);
1637 ubi_dbg_dump_stack();
1638 return -EINVAL;
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649static int paranoid_check_in_pq(const struct ubi_device *ubi,
1650 struct ubi_wl_entry *e)
1651{
1652 struct ubi_wl_entry *p;
1653 int i;
1654
1655 if (!ubi->dbg->chk_gen)
1656 return 0;
1657
1658 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1659 list_for_each_entry(p, &ubi->pq[i], u.list)
1660 if (p == e)
1661 return 0;
1662
1663 ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1664 e->pnum, e->ec);
1665 ubi_dbg_dump_stack();
1666 return -EINVAL;
1667}
1668
1669#endif
1670