1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88#include <linux/slab.h>
89#include <linux/crc32.h>
90#include <linux/freezer.h>
91#include <linux/kthread.h>
92#include "ubi.h"
93#include "wl.h"
94
95
96#define WL_RESERVED_PEBS 1
97
98
99
100
101
102
103
104#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
105
106
107
108
109
110
111
112
113
114
115
116
117#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
118
119
120
121
122
123#define WL_MAX_FAILURES 32
124
125static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
126static int self_check_in_wl_tree(const struct ubi_device *ubi,
127 struct ubi_wl_entry *e, struct rb_root *root);
128static int self_check_in_pq(const struct ubi_device *ubi,
129 struct ubi_wl_entry *e);
130
131
132
133
134
135
136
137
138
139static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
140{
141 struct rb_node **p, *parent = NULL;
142
143 p = &root->rb_node;
144 while (*p) {
145 struct ubi_wl_entry *e1;
146
147 parent = *p;
148 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
149
150 if (e->ec < e1->ec)
151 p = &(*p)->rb_left;
152 else if (e->ec > e1->ec)
153 p = &(*p)->rb_right;
154 else {
155 ubi_assert(e->pnum != e1->pnum);
156 if (e->pnum < e1->pnum)
157 p = &(*p)->rb_left;
158 else
159 p = &(*p)->rb_right;
160 }
161 }
162
163 rb_link_node(&e->u.rb, parent, p);
164 rb_insert_color(&e->u.rb, root);
165}
166
167
168
169
170
171
172
173
174
175static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
176{
177 ubi->lookuptbl[e->pnum] = NULL;
178 kmem_cache_free(ubi_wl_entry_slab, e);
179}
180
181
182
183
184
185
186
187
188static int do_work(struct ubi_device *ubi)
189{
190 int err;
191 struct ubi_work *wrk;
192
193 cond_resched();
194
195
196
197
198
199
200
201 down_read(&ubi->work_sem);
202 spin_lock(&ubi->wl_lock);
203 if (list_empty(&ubi->works)) {
204 spin_unlock(&ubi->wl_lock);
205 up_read(&ubi->work_sem);
206 return 0;
207 }
208
209 wrk = list_entry(ubi->works.next, struct ubi_work, list);
210 list_del(&wrk->list);
211 ubi->works_count -= 1;
212 ubi_assert(ubi->works_count >= 0);
213 spin_unlock(&ubi->wl_lock);
214
215
216
217
218
219
220 err = wrk->func(ubi, wrk, 0);
221 if (err)
222 ubi_err(ubi, "work failed with error code %d", err);
223 up_read(&ubi->work_sem);
224
225 return err;
226}
227
228
229
230
231
232
233
234
235
236static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
237{
238 struct rb_node *p;
239
240 p = root->rb_node;
241 while (p) {
242 struct ubi_wl_entry *e1;
243
244 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
245
246 if (e->pnum == e1->pnum) {
247 ubi_assert(e == e1);
248 return 1;
249 }
250
251 if (e->ec < e1->ec)
252 p = p->rb_left;
253 else if (e->ec > e1->ec)
254 p = p->rb_right;
255 else {
256 ubi_assert(e->pnum != e1->pnum);
257 if (e->pnum < e1->pnum)
258 p = p->rb_left;
259 else
260 p = p->rb_right;
261 }
262 }
263
264 return 0;
265}
266
267
268
269
270
271
272
273
274
275static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
276{
277 struct ubi_wl_entry *p;
278 int i;
279
280 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
281 list_for_each_entry(p, &ubi->pq[i], u.list)
282 if (p == e)
283 return 1;
284
285 return 0;
286}
287
288
289
290
291
292
293
294
295
296
297
298static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
299{
300 int pq_tail = ubi->pq_head - 1;
301
302 if (pq_tail < 0)
303 pq_tail = UBI_PROT_QUEUE_LEN - 1;
304 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
305 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
306 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
307}
308
309
310
311
312
313
314
315
316
317
318static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
319 struct rb_root *root, int diff)
320{
321 struct rb_node *p;
322 struct ubi_wl_entry *e, *prev_e = NULL;
323 int max;
324
325 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
326 max = e->ec + diff;
327
328 p = root->rb_node;
329 while (p) {
330 struct ubi_wl_entry *e1;
331
332 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
333 if (e1->ec >= max)
334 p = p->rb_left;
335 else {
336 p = p->rb_right;
337 prev_e = e;
338 e = e1;
339 }
340 }
341
342
343
344
345 if (prev_e && !ubi->fm_disabled &&
346 !ubi->fm && e->pnum < UBI_FM_MAX_START)
347 return prev_e;
348
349 return e;
350}
351
352
353
354
355
356
357
358
359
360
361static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
362 struct rb_root *root)
363{
364 struct ubi_wl_entry *e, *first, *last;
365
366 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
367 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
368
369 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
370 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
371
372
373
374
375 e = may_reserve_for_fm(ubi, e, root);
376 } else
377 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
378
379 return e;
380}
381
382
383
384
385
386
387
388
389
390static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
391{
392 struct ubi_wl_entry *e;
393
394 e = find_mean_wl_entry(ubi, &ubi->free);
395 if (!e) {
396 ubi_err(ubi, "no free eraseblocks");
397 return NULL;
398 }
399
400 self_check_in_wl_tree(ubi, e, &ubi->free);
401
402
403
404
405
406 rb_erase(&e->u.rb, &ubi->free);
407 ubi->free_count--;
408 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
409
410 return e;
411}
412
413
414
415
416
417
418
419
420
421static int prot_queue_del(struct ubi_device *ubi, int pnum)
422{
423 struct ubi_wl_entry *e;
424
425 e = ubi->lookuptbl[pnum];
426 if (!e)
427 return -ENODEV;
428
429 if (self_check_in_pq(ubi, e))
430 return -ENODEV;
431
432 list_del(&e->u.list);
433 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
434 return 0;
435}
436
437
438
439
440
441
442
443
444
445
446static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
447 int torture)
448{
449 int err;
450 struct ubi_ec_hdr *ec_hdr;
451 unsigned long long ec = e->ec;
452
453 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
454
455 err = self_check_ec(ubi, e->pnum, e->ec);
456 if (err)
457 return -EINVAL;
458
459 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
460 if (!ec_hdr)
461 return -ENOMEM;
462
463 err = ubi_io_sync_erase(ubi, e->pnum, torture);
464 if (err < 0)
465 goto out_free;
466
467 ec += err;
468 if (ec > UBI_MAX_ERASECOUNTER) {
469
470
471
472
473 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
474 e->pnum, ec);
475 err = -EINVAL;
476 goto out_free;
477 }
478
479 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
480
481 ec_hdr->ec = cpu_to_be64(ec);
482
483 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
484 if (err)
485 goto out_free;
486
487 e->ec = ec;
488 spin_lock(&ubi->wl_lock);
489 if (e->ec > ubi->max_ec)
490 ubi->max_ec = e->ec;
491 spin_unlock(&ubi->wl_lock);
492
493out_free:
494 kfree(ec_hdr);
495 return err;
496}
497
498
499
500
501
502
503
504
505
506static void serve_prot_queue(struct ubi_device *ubi)
507{
508 struct ubi_wl_entry *e, *tmp;
509 int count;
510
511
512
513
514
515repeat:
516 count = 0;
517 spin_lock(&ubi->wl_lock);
518 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
519 dbg_wl("PEB %d EC %d protection over, move to used tree",
520 e->pnum, e->ec);
521
522 list_del(&e->u.list);
523 wl_tree_add(e, &ubi->used);
524 if (count++ > 32) {
525
526
527
528
529 spin_unlock(&ubi->wl_lock);
530 cond_resched();
531 goto repeat;
532 }
533 }
534
535 ubi->pq_head += 1;
536 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
537 ubi->pq_head = 0;
538 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
539 spin_unlock(&ubi->wl_lock);
540}
541
542
543
544
545
546
547
548
549
550static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
551{
552 spin_lock(&ubi->wl_lock);
553 list_add_tail(&wrk->list, &ubi->works);
554 ubi_assert(ubi->works_count >= 0);
555 ubi->works_count += 1;
556 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
557 wake_up_process(ubi->bgt_thread);
558 spin_unlock(&ubi->wl_lock);
559}
560
561
562
563
564
565
566
567
568
569static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
570{
571 down_read(&ubi->work_sem);
572 __schedule_ubi_work(ubi, wrk);
573 up_read(&ubi->work_sem);
574}
575
576static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
577 int shutdown);
578
579
580
581
582
583
584
585
586
587
588
589
590static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
591 int vol_id, int lnum, int torture, bool nested)
592{
593 struct ubi_work *wl_wrk;
594
595 ubi_assert(e);
596
597 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
598 e->pnum, e->ec, torture);
599
600 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
601 if (!wl_wrk)
602 return -ENOMEM;
603
604 wl_wrk->func = &erase_worker;
605 wl_wrk->e = e;
606 wl_wrk->vol_id = vol_id;
607 wl_wrk->lnum = lnum;
608 wl_wrk->torture = torture;
609
610 if (nested)
611 __schedule_ubi_work(ubi, wl_wrk);
612 else
613 schedule_ubi_work(ubi, wl_wrk);
614 return 0;
615}
616
617static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
618
619
620
621
622
623
624
625
626
627static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
628 int vol_id, int lnum, int torture)
629{
630 struct ubi_work wl_wrk;
631
632 dbg_wl("sync erase of PEB %i", e->pnum);
633
634 wl_wrk.e = e;
635 wl_wrk.vol_id = vol_id;
636 wl_wrk.lnum = lnum;
637 wl_wrk.torture = torture;
638
639 return __erase_worker(ubi, &wl_wrk);
640}
641
642static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
643
644
645
646
647
648
649
650
651
652
653
654static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
655 int shutdown)
656{
657 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
658 int erase = 0, keep = 0, vol_id = -1, lnum = -1;
659#ifdef CONFIG_MTD_UBI_FASTMAP
660 int anchor = wrk->anchor;
661#endif
662 struct ubi_wl_entry *e1, *e2;
663 struct ubi_vid_io_buf *vidb;
664 struct ubi_vid_hdr *vid_hdr;
665 int dst_leb_clean = 0;
666
667 kfree(wrk);
668 if (shutdown)
669 return 0;
670
671 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
672 if (!vidb)
673 return -ENOMEM;
674
675 vid_hdr = ubi_get_vid_hdr(vidb);
676
677 down_read(&ubi->fm_eba_sem);
678 mutex_lock(&ubi->move_mutex);
679 spin_lock(&ubi->wl_lock);
680 ubi_assert(!ubi->move_from && !ubi->move_to);
681 ubi_assert(!ubi->move_to_put);
682
683 if (!ubi->free.rb_node ||
684 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
685
686
687
688
689
690
691
692
693
694
695 dbg_wl("cancel WL, a list is empty: free %d, used %d",
696 !ubi->free.rb_node, !ubi->used.rb_node);
697 goto out_cancel;
698 }
699
700#ifdef CONFIG_MTD_UBI_FASTMAP
701
702 if (!anchor)
703 anchor = !anchor_pebs_available(&ubi->free);
704
705 if (anchor) {
706 e1 = find_anchor_wl_entry(&ubi->used);
707 if (!e1)
708 goto out_cancel;
709 e2 = get_peb_for_wl(ubi);
710 if (!e2)
711 goto out_cancel;
712
713 self_check_in_wl_tree(ubi, e1, &ubi->used);
714 rb_erase(&e1->u.rb, &ubi->used);
715 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
716 } else if (!ubi->scrub.rb_node) {
717#else
718 if (!ubi->scrub.rb_node) {
719#endif
720
721
722
723
724
725 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
726 e2 = get_peb_for_wl(ubi);
727 if (!e2)
728 goto out_cancel;
729
730 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
731 dbg_wl("no WL needed: min used EC %d, max free EC %d",
732 e1->ec, e2->ec);
733
734
735 wl_tree_add(e2, &ubi->free);
736 ubi->free_count++;
737 goto out_cancel;
738 }
739 self_check_in_wl_tree(ubi, e1, &ubi->used);
740 rb_erase(&e1->u.rb, &ubi->used);
741 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
742 e1->pnum, e1->ec, e2->pnum, e2->ec);
743 } else {
744
745 scrubbing = 1;
746 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
747 e2 = get_peb_for_wl(ubi);
748 if (!e2)
749 goto out_cancel;
750
751 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
752 rb_erase(&e1->u.rb, &ubi->scrub);
753 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
754 }
755
756 ubi->move_from = e1;
757 ubi->move_to = e2;
758 spin_unlock(&ubi->wl_lock);
759
760
761
762
763
764
765
766
767
768
769
770
771 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
772 if (err && err != UBI_IO_BITFLIPS) {
773 dst_leb_clean = 1;
774 if (err == UBI_IO_FF) {
775
776
777
778
779
780
781
782
783
784
785 dbg_wl("PEB %d has no VID header", e1->pnum);
786 protect = 1;
787 goto out_not_moved;
788 } else if (err == UBI_IO_FF_BITFLIPS) {
789
790
791
792
793
794 dbg_wl("PEB %d has no VID header but has bit-flips",
795 e1->pnum);
796 scrubbing = 1;
797 goto out_not_moved;
798 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
799
800
801
802
803
804 dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
805 e1->pnum);
806 erase = 1;
807 goto out_not_moved;
808 }
809
810 ubi_err(ubi, "error %d while reading VID header from PEB %d",
811 err, e1->pnum);
812 goto out_error;
813 }
814
815 vol_id = be32_to_cpu(vid_hdr->vol_id);
816 lnum = be32_to_cpu(vid_hdr->lnum);
817
818 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
819 if (err) {
820 if (err == MOVE_CANCEL_RACE) {
821
822
823
824
825
826
827
828 protect = 1;
829 dst_leb_clean = 1;
830 goto out_not_moved;
831 }
832 if (err == MOVE_RETRY) {
833 scrubbing = 1;
834 dst_leb_clean = 1;
835 goto out_not_moved;
836 }
837 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
838 err == MOVE_TARGET_RD_ERR) {
839
840
841
842 torture = 1;
843 keep = 1;
844 goto out_not_moved;
845 }
846
847 if (err == MOVE_SOURCE_RD_ERR) {
848
849
850
851
852
853
854
855
856 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
857 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
858 ubi->erroneous_peb_count);
859 goto out_error;
860 }
861 dst_leb_clean = 1;
862 erroneous = 1;
863 goto out_not_moved;
864 }
865
866 if (err < 0)
867 goto out_error;
868
869 ubi_assert(0);
870 }
871
872
873 if (scrubbing)
874 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
875 e1->pnum, vol_id, lnum, e2->pnum);
876 ubi_free_vid_buf(vidb);
877
878 spin_lock(&ubi->wl_lock);
879 if (!ubi->move_to_put) {
880 wl_tree_add(e2, &ubi->used);
881 e2 = NULL;
882 }
883 ubi->move_from = ubi->move_to = NULL;
884 ubi->move_to_put = ubi->wl_scheduled = 0;
885 spin_unlock(&ubi->wl_lock);
886
887 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
888 if (err) {
889 if (e2)
890 wl_entry_destroy(ubi, e2);
891 goto out_ro;
892 }
893
894 if (e2) {
895
896
897
898
899 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
900 e2->pnum, vol_id, lnum);
901 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
902 if (err)
903 goto out_ro;
904 }
905
906 dbg_wl("done");
907 mutex_unlock(&ubi->move_mutex);
908 up_read(&ubi->fm_eba_sem);
909 return 0;
910
911
912
913
914
915
916out_not_moved:
917 if (vol_id != -1)
918 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
919 e1->pnum, vol_id, lnum, e2->pnum, err);
920 else
921 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
922 e1->pnum, e2->pnum, err);
923 spin_lock(&ubi->wl_lock);
924 if (protect)
925 prot_queue_add(ubi, e1);
926 else if (erroneous) {
927 wl_tree_add(e1, &ubi->erroneous);
928 ubi->erroneous_peb_count += 1;
929 } else if (scrubbing)
930 wl_tree_add(e1, &ubi->scrub);
931 else if (keep)
932 wl_tree_add(e1, &ubi->used);
933 if (dst_leb_clean) {
934 wl_tree_add(e2, &ubi->free);
935 ubi->free_count++;
936 }
937
938 ubi_assert(!ubi->move_to_put);
939 ubi->move_from = ubi->move_to = NULL;
940 ubi->wl_scheduled = 0;
941 spin_unlock(&ubi->wl_lock);
942
943 ubi_free_vid_buf(vidb);
944 if (dst_leb_clean) {
945 ensure_wear_leveling(ubi, 1);
946 } else {
947 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
948 if (err)
949 goto out_ro;
950 }
951
952 if (erase) {
953 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
954 if (err)
955 goto out_ro;
956 }
957
958 mutex_unlock(&ubi->move_mutex);
959 up_read(&ubi->fm_eba_sem);
960 return 0;
961
962out_error:
963 if (vol_id != -1)
964 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
965 err, e1->pnum, e2->pnum);
966 else
967 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
968 err, e1->pnum, vol_id, lnum, e2->pnum);
969 spin_lock(&ubi->wl_lock);
970 ubi->move_from = ubi->move_to = NULL;
971 ubi->move_to_put = ubi->wl_scheduled = 0;
972 spin_unlock(&ubi->wl_lock);
973
974 ubi_free_vid_buf(vidb);
975 wl_entry_destroy(ubi, e1);
976 wl_entry_destroy(ubi, e2);
977
978out_ro:
979 ubi_ro_mode(ubi);
980 mutex_unlock(&ubi->move_mutex);
981 up_read(&ubi->fm_eba_sem);
982 ubi_assert(err != 0);
983 return err < 0 ? err : -EIO;
984
985out_cancel:
986 ubi->wl_scheduled = 0;
987 spin_unlock(&ubi->wl_lock);
988 mutex_unlock(&ubi->move_mutex);
989 up_read(&ubi->fm_eba_sem);
990 ubi_free_vid_buf(vidb);
991 return 0;
992}
993
994
995
996
997
998
999
1000
1001
1002
1003static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1004{
1005 int err = 0;
1006 struct ubi_wl_entry *e1;
1007 struct ubi_wl_entry *e2;
1008 struct ubi_work *wrk;
1009
1010 spin_lock(&ubi->wl_lock);
1011 if (ubi->wl_scheduled)
1012
1013 goto out_unlock;
1014
1015
1016
1017
1018
1019 if (!ubi->scrub.rb_node) {
1020 if (!ubi->used.rb_node || !ubi->free.rb_node)
1021
1022 goto out_unlock;
1023
1024
1025
1026
1027
1028
1029
1030 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1031 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1032
1033 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1034 goto out_unlock;
1035 dbg_wl("schedule wear-leveling");
1036 } else
1037 dbg_wl("schedule scrubbing");
1038
1039 ubi->wl_scheduled = 1;
1040 spin_unlock(&ubi->wl_lock);
1041
1042 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1043 if (!wrk) {
1044 err = -ENOMEM;
1045 goto out_cancel;
1046 }
1047
1048 wrk->anchor = 0;
1049 wrk->func = &wear_leveling_worker;
1050 if (nested)
1051 __schedule_ubi_work(ubi, wrk);
1052 else
1053 schedule_ubi_work(ubi, wrk);
1054 return err;
1055
1056out_cancel:
1057 spin_lock(&ubi->wl_lock);
1058 ubi->wl_scheduled = 0;
1059out_unlock:
1060 spin_unlock(&ubi->wl_lock);
1061 return err;
1062}
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1077{
1078 struct ubi_wl_entry *e = wl_wrk->e;
1079 int pnum = e->pnum;
1080 int vol_id = wl_wrk->vol_id;
1081 int lnum = wl_wrk->lnum;
1082 int err, available_consumed = 0;
1083
1084 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1085 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1086
1087 err = sync_erase(ubi, e, wl_wrk->torture);
1088 if (!err) {
1089 spin_lock(&ubi->wl_lock);
1090 wl_tree_add(e, &ubi->free);
1091 ubi->free_count++;
1092 spin_unlock(&ubi->wl_lock);
1093
1094
1095
1096
1097
1098 serve_prot_queue(ubi);
1099
1100
1101 err = ensure_wear_leveling(ubi, 1);
1102 return err;
1103 }
1104
1105 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1106
1107 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1108 err == -EBUSY) {
1109 int err1;
1110
1111
1112 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1113 if (err1) {
1114 wl_entry_destroy(ubi, e);
1115 err = err1;
1116 goto out_ro;
1117 }
1118 return err;
1119 }
1120
1121 wl_entry_destroy(ubi, e);
1122 if (err != -EIO)
1123
1124
1125
1126
1127
1128 goto out_ro;
1129
1130
1131
1132 if (!ubi->bad_allowed) {
1133 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1134 goto out_ro;
1135 }
1136
1137 spin_lock(&ubi->volumes_lock);
1138 if (ubi->beb_rsvd_pebs == 0) {
1139 if (ubi->avail_pebs == 0) {
1140 spin_unlock(&ubi->volumes_lock);
1141 ubi_err(ubi, "no reserved/available physical eraseblocks");
1142 goto out_ro;
1143 }
1144 ubi->avail_pebs -= 1;
1145 available_consumed = 1;
1146 }
1147 spin_unlock(&ubi->volumes_lock);
1148
1149 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1150 err = ubi_io_mark_bad(ubi, pnum);
1151 if (err)
1152 goto out_ro;
1153
1154 spin_lock(&ubi->volumes_lock);
1155 if (ubi->beb_rsvd_pebs > 0) {
1156 if (available_consumed) {
1157
1158
1159
1160
1161 ubi->avail_pebs += 1;
1162 available_consumed = 0;
1163 }
1164 ubi->beb_rsvd_pebs -= 1;
1165 }
1166 ubi->bad_peb_count += 1;
1167 ubi->good_peb_count -= 1;
1168 ubi_calculate_reserved(ubi);
1169 if (available_consumed)
1170 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1171 else if (ubi->beb_rsvd_pebs)
1172 ubi_msg(ubi, "%d PEBs left in the reserve",
1173 ubi->beb_rsvd_pebs);
1174 else
1175 ubi_warn(ubi, "last PEB from the reserve was used");
1176 spin_unlock(&ubi->volumes_lock);
1177
1178 return err;
1179
1180out_ro:
1181 if (available_consumed) {
1182 spin_lock(&ubi->volumes_lock);
1183 ubi->avail_pebs += 1;
1184 spin_unlock(&ubi->volumes_lock);
1185 }
1186 ubi_ro_mode(ubi);
1187 return err;
1188}
1189
1190static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1191 int shutdown)
1192{
1193 int ret;
1194
1195 if (shutdown) {
1196 struct ubi_wl_entry *e = wl_wrk->e;
1197
1198 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1199 kfree(wl_wrk);
1200 wl_entry_destroy(ubi, e);
1201 return 0;
1202 }
1203
1204 ret = __erase_worker(ubi, wl_wrk);
1205 kfree(wl_wrk);
1206 return ret;
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1223 int pnum, int torture)
1224{
1225 int err;
1226 struct ubi_wl_entry *e;
1227
1228 dbg_wl("PEB %d", pnum);
1229 ubi_assert(pnum >= 0);
1230 ubi_assert(pnum < ubi->peb_count);
1231
1232 down_read(&ubi->fm_protect);
1233
1234retry:
1235 spin_lock(&ubi->wl_lock);
1236 e = ubi->lookuptbl[pnum];
1237 if (e == ubi->move_from) {
1238
1239
1240
1241
1242
1243 dbg_wl("PEB %d is being moved, wait", pnum);
1244 spin_unlock(&ubi->wl_lock);
1245
1246
1247 mutex_lock(&ubi->move_mutex);
1248 mutex_unlock(&ubi->move_mutex);
1249 goto retry;
1250 } else if (e == ubi->move_to) {
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 dbg_wl("PEB %d is the target of data moving", pnum);
1261 ubi_assert(!ubi->move_to_put);
1262 ubi->move_to_put = 1;
1263 spin_unlock(&ubi->wl_lock);
1264 up_read(&ubi->fm_protect);
1265 return 0;
1266 } else {
1267 if (in_wl_tree(e, &ubi->used)) {
1268 self_check_in_wl_tree(ubi, e, &ubi->used);
1269 rb_erase(&e->u.rb, &ubi->used);
1270 } else if (in_wl_tree(e, &ubi->scrub)) {
1271 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1272 rb_erase(&e->u.rb, &ubi->scrub);
1273 } else if (in_wl_tree(e, &ubi->erroneous)) {
1274 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1275 rb_erase(&e->u.rb, &ubi->erroneous);
1276 ubi->erroneous_peb_count -= 1;
1277 ubi_assert(ubi->erroneous_peb_count >= 0);
1278
1279 torture = 1;
1280 } else {
1281 err = prot_queue_del(ubi, e->pnum);
1282 if (err) {
1283 ubi_err(ubi, "PEB %d not found", pnum);
1284 ubi_ro_mode(ubi);
1285 spin_unlock(&ubi->wl_lock);
1286 up_read(&ubi->fm_protect);
1287 return err;
1288 }
1289 }
1290 }
1291 spin_unlock(&ubi->wl_lock);
1292
1293 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1294 if (err) {
1295 spin_lock(&ubi->wl_lock);
1296 wl_tree_add(e, &ubi->used);
1297 spin_unlock(&ubi->wl_lock);
1298 }
1299
1300 up_read(&ubi->fm_protect);
1301 return err;
1302}
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1315{
1316 struct ubi_wl_entry *e;
1317
1318 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1319
1320retry:
1321 spin_lock(&ubi->wl_lock);
1322 e = ubi->lookuptbl[pnum];
1323 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1324 in_wl_tree(e, &ubi->erroneous)) {
1325 spin_unlock(&ubi->wl_lock);
1326 return 0;
1327 }
1328
1329 if (e == ubi->move_to) {
1330
1331
1332
1333
1334
1335
1336 spin_unlock(&ubi->wl_lock);
1337 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1338 yield();
1339 goto retry;
1340 }
1341
1342 if (in_wl_tree(e, &ubi->used)) {
1343 self_check_in_wl_tree(ubi, e, &ubi->used);
1344 rb_erase(&e->u.rb, &ubi->used);
1345 } else {
1346 int err;
1347
1348 err = prot_queue_del(ubi, e->pnum);
1349 if (err) {
1350 ubi_err(ubi, "PEB %d not found", pnum);
1351 ubi_ro_mode(ubi);
1352 spin_unlock(&ubi->wl_lock);
1353 return err;
1354 }
1355 }
1356
1357 wl_tree_add(e, &ubi->scrub);
1358 spin_unlock(&ubi->wl_lock);
1359
1360
1361
1362
1363
1364 return ensure_wear_leveling(ubi, 0);
1365}
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1380{
1381 int err = 0;
1382 int found = 1;
1383
1384
1385
1386
1387
1388 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1389 vol_id, lnum, ubi->works_count);
1390
1391 while (found) {
1392 struct ubi_work *wrk, *tmp;
1393 found = 0;
1394
1395 down_read(&ubi->work_sem);
1396 spin_lock(&ubi->wl_lock);
1397 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1398 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1399 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1400 list_del(&wrk->list);
1401 ubi->works_count -= 1;
1402 ubi_assert(ubi->works_count >= 0);
1403 spin_unlock(&ubi->wl_lock);
1404
1405 err = wrk->func(ubi, wrk, 0);
1406 if (err) {
1407 up_read(&ubi->work_sem);
1408 return err;
1409 }
1410
1411 spin_lock(&ubi->wl_lock);
1412 found = 1;
1413 break;
1414 }
1415 }
1416 spin_unlock(&ubi->wl_lock);
1417 up_read(&ubi->work_sem);
1418 }
1419
1420
1421
1422
1423
1424 down_write(&ubi->work_sem);
1425 up_write(&ubi->work_sem);
1426
1427 return err;
1428}
1429
1430static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1431{
1432 if (in_wl_tree(e, &ubi->scrub))
1433 return false;
1434 else if (in_wl_tree(e, &ubi->erroneous))
1435 return false;
1436 else if (ubi->move_from == e)
1437 return false;
1438 else if (ubi->move_to == e)
1439 return false;
1440
1441 return true;
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1464{
1465 int err = 0;
1466 struct ubi_wl_entry *e;
1467
1468 if (pnum < 0 || pnum >= ubi->peb_count) {
1469 err = -EINVAL;
1470 goto out;
1471 }
1472
1473
1474
1475
1476
1477 down_write(&ubi->work_sem);
1478
1479
1480
1481
1482
1483 spin_lock(&ubi->wl_lock);
1484 e = ubi->lookuptbl[pnum];
1485 if (!e) {
1486 spin_unlock(&ubi->wl_lock);
1487 err = -ENOENT;
1488 goto out_resume;
1489 }
1490
1491
1492
1493
1494 if (!scrub_possible(ubi, e)) {
1495 spin_unlock(&ubi->wl_lock);
1496 err = -EBUSY;
1497 goto out_resume;
1498 }
1499 spin_unlock(&ubi->wl_lock);
1500
1501 if (!force) {
1502 mutex_lock(&ubi->buf_mutex);
1503 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1504 mutex_unlock(&ubi->buf_mutex);
1505 }
1506
1507 if (force || err == UBI_IO_BITFLIPS) {
1508
1509
1510
1511 spin_lock(&ubi->wl_lock);
1512
1513
1514
1515
1516
1517 e = ubi->lookuptbl[pnum];
1518 if (!e) {
1519 spin_unlock(&ubi->wl_lock);
1520 err = -ENOENT;
1521 goto out_resume;
1522 }
1523
1524
1525
1526
1527 if (!scrub_possible(ubi, e)) {
1528 spin_unlock(&ubi->wl_lock);
1529 err = -EBUSY;
1530 goto out_resume;
1531 }
1532
1533 if (in_pq(ubi, e)) {
1534 prot_queue_del(ubi, e->pnum);
1535 wl_tree_add(e, &ubi->scrub);
1536 spin_unlock(&ubi->wl_lock);
1537
1538 err = ensure_wear_leveling(ubi, 1);
1539 } else if (in_wl_tree(e, &ubi->used)) {
1540 rb_erase(&e->u.rb, &ubi->used);
1541 wl_tree_add(e, &ubi->scrub);
1542 spin_unlock(&ubi->wl_lock);
1543
1544 err = ensure_wear_leveling(ubi, 1);
1545 } else if (in_wl_tree(e, &ubi->free)) {
1546 rb_erase(&e->u.rb, &ubi->free);
1547 ubi->free_count--;
1548 spin_unlock(&ubi->wl_lock);
1549
1550
1551
1552
1553
1554 err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1555 force ? 0 : 1, true);
1556 } else {
1557 spin_unlock(&ubi->wl_lock);
1558 err = -EAGAIN;
1559 }
1560
1561 if (!err && !force)
1562 err = -EUCLEAN;
1563 } else {
1564 err = 0;
1565 }
1566
1567out_resume:
1568 up_write(&ubi->work_sem);
1569out:
1570
1571 return err;
1572}
1573
1574
1575
1576
1577
1578
1579static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1580{
1581 struct rb_node *rb;
1582 struct ubi_wl_entry *e;
1583
1584 rb = root->rb_node;
1585 while (rb) {
1586 if (rb->rb_left)
1587 rb = rb->rb_left;
1588 else if (rb->rb_right)
1589 rb = rb->rb_right;
1590 else {
1591 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1592
1593 rb = rb_parent(rb);
1594 if (rb) {
1595 if (rb->rb_left == &e->u.rb)
1596 rb->rb_left = NULL;
1597 else
1598 rb->rb_right = NULL;
1599 }
1600
1601 wl_entry_destroy(ubi, e);
1602 }
1603 }
1604}
1605
1606
1607
1608
1609
1610int ubi_thread(void *u)
1611{
1612 int failures = 0;
1613 struct ubi_device *ubi = u;
1614
1615 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1616 ubi->bgt_name, task_pid_nr(current));
1617
1618 set_freezable();
1619 for (;;) {
1620 int err;
1621
1622 if (kthread_should_stop())
1623 break;
1624
1625 if (try_to_freeze())
1626 continue;
1627
1628 spin_lock(&ubi->wl_lock);
1629 if (list_empty(&ubi->works) || ubi->ro_mode ||
1630 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1631 set_current_state(TASK_INTERRUPTIBLE);
1632 spin_unlock(&ubi->wl_lock);
1633 schedule();
1634 continue;
1635 }
1636 spin_unlock(&ubi->wl_lock);
1637
1638 err = do_work(ubi);
1639 if (err) {
1640 ubi_err(ubi, "%s: work failed with error code %d",
1641 ubi->bgt_name, err);
1642 if (failures++ > WL_MAX_FAILURES) {
1643
1644
1645
1646
1647 ubi_msg(ubi, "%s: %d consecutive failures",
1648 ubi->bgt_name, WL_MAX_FAILURES);
1649 ubi_ro_mode(ubi);
1650 ubi->thread_enabled = 0;
1651 continue;
1652 }
1653 } else
1654 failures = 0;
1655
1656 cond_resched();
1657 }
1658
1659 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1660 ubi->thread_enabled = 0;
1661 return 0;
1662}
1663
1664
1665
1666
1667
1668static void shutdown_work(struct ubi_device *ubi)
1669{
1670 while (!list_empty(&ubi->works)) {
1671 struct ubi_work *wrk;
1672
1673 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1674 list_del(&wrk->list);
1675 wrk->func(ubi, wrk, 1);
1676 ubi->works_count -= 1;
1677 ubi_assert(ubi->works_count >= 0);
1678 }
1679}
1680
1681
1682
1683
1684
1685
1686
1687static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1688{
1689 struct ubi_wl_entry *e;
1690 int err;
1691
1692 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1693 if (!e)
1694 return -ENOMEM;
1695
1696 e->pnum = aeb->pnum;
1697 e->ec = aeb->ec;
1698 ubi->lookuptbl[e->pnum] = e;
1699
1700 if (sync) {
1701 err = sync_erase(ubi, e, false);
1702 if (err)
1703 goto out_free;
1704
1705 wl_tree_add(e, &ubi->free);
1706 ubi->free_count++;
1707 } else {
1708 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1709 if (err)
1710 goto out_free;
1711 }
1712
1713 return 0;
1714
1715out_free:
1716 wl_entry_destroy(ubi, e);
1717
1718 return err;
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1730{
1731 int err, i, reserved_pebs, found_pebs = 0;
1732 struct rb_node *rb1, *rb2;
1733 struct ubi_ainf_volume *av;
1734 struct ubi_ainf_peb *aeb, *tmp;
1735 struct ubi_wl_entry *e;
1736
1737 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1738 spin_lock_init(&ubi->wl_lock);
1739 mutex_init(&ubi->move_mutex);
1740 init_rwsem(&ubi->work_sem);
1741 ubi->max_ec = ai->max_ec;
1742 INIT_LIST_HEAD(&ubi->works);
1743
1744 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1745
1746 err = -ENOMEM;
1747 ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1748 if (!ubi->lookuptbl)
1749 return err;
1750
1751 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1752 INIT_LIST_HEAD(&ubi->pq[i]);
1753 ubi->pq_head = 0;
1754
1755 ubi->free_count = 0;
1756 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1757 cond_resched();
1758
1759 err = erase_aeb(ubi, aeb, false);
1760 if (err)
1761 goto out_free;
1762
1763 found_pebs++;
1764 }
1765
1766 list_for_each_entry(aeb, &ai->free, u.list) {
1767 cond_resched();
1768
1769 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1770 if (!e) {
1771 err = -ENOMEM;
1772 goto out_free;
1773 }
1774
1775 e->pnum = aeb->pnum;
1776 e->ec = aeb->ec;
1777 ubi_assert(e->ec >= 0);
1778
1779 wl_tree_add(e, &ubi->free);
1780 ubi->free_count++;
1781
1782 ubi->lookuptbl[e->pnum] = e;
1783
1784 found_pebs++;
1785 }
1786
1787 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1788 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1789 cond_resched();
1790
1791 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1792 if (!e) {
1793 err = -ENOMEM;
1794 goto out_free;
1795 }
1796
1797 e->pnum = aeb->pnum;
1798 e->ec = aeb->ec;
1799 ubi->lookuptbl[e->pnum] = e;
1800
1801 if (!aeb->scrub) {
1802 dbg_wl("add PEB %d EC %d to the used tree",
1803 e->pnum, e->ec);
1804 wl_tree_add(e, &ubi->used);
1805 } else {
1806 dbg_wl("add PEB %d EC %d to the scrub tree",
1807 e->pnum, e->ec);
1808 wl_tree_add(e, &ubi->scrub);
1809 }
1810
1811 found_pebs++;
1812 }
1813 }
1814
1815 list_for_each_entry(aeb, &ai->fastmap, u.list) {
1816 cond_resched();
1817
1818 e = ubi_find_fm_block(ubi, aeb->pnum);
1819
1820 if (e) {
1821 ubi_assert(!ubi->lookuptbl[e->pnum]);
1822 ubi->lookuptbl[e->pnum] = e;
1823 } else {
1824 bool sync = false;
1825
1826
1827
1828
1829
1830
1831
1832 if (ubi->lookuptbl[aeb->pnum])
1833 continue;
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1845 sync = true;
1846
1847 err = erase_aeb(ubi, aeb, sync);
1848 if (err)
1849 goto out_free;
1850 }
1851
1852 found_pebs++;
1853 }
1854
1855 dbg_wl("found %i PEBs", found_pebs);
1856
1857 ubi_assert(ubi->good_peb_count == found_pebs);
1858
1859 reserved_pebs = WL_RESERVED_PEBS;
1860 ubi_fastmap_init(ubi, &reserved_pebs);
1861
1862 if (ubi->avail_pebs < reserved_pebs) {
1863 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1864 ubi->avail_pebs, reserved_pebs);
1865 if (ubi->corr_peb_count)
1866 ubi_err(ubi, "%d PEBs are corrupted and not used",
1867 ubi->corr_peb_count);
1868 err = -ENOSPC;
1869 goto out_free;
1870 }
1871 ubi->avail_pebs -= reserved_pebs;
1872 ubi->rsvd_pebs += reserved_pebs;
1873
1874
1875 err = ensure_wear_leveling(ubi, 0);
1876 if (err)
1877 goto out_free;
1878
1879 return 0;
1880
1881out_free:
1882 shutdown_work(ubi);
1883 tree_destroy(ubi, &ubi->used);
1884 tree_destroy(ubi, &ubi->free);
1885 tree_destroy(ubi, &ubi->scrub);
1886 kfree(ubi->lookuptbl);
1887 return err;
1888}
1889
1890
1891
1892
1893
1894static void protection_queue_destroy(struct ubi_device *ubi)
1895{
1896 int i;
1897 struct ubi_wl_entry *e, *tmp;
1898
1899 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1900 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1901 list_del(&e->u.list);
1902 wl_entry_destroy(ubi, e);
1903 }
1904 }
1905}
1906
1907
1908
1909
1910
1911void ubi_wl_close(struct ubi_device *ubi)
1912{
1913 dbg_wl("close the WL sub-system");
1914 ubi_fastmap_close(ubi);
1915 shutdown_work(ubi);
1916 protection_queue_destroy(ubi);
1917 tree_destroy(ubi, &ubi->used);
1918 tree_destroy(ubi, &ubi->erroneous);
1919 tree_destroy(ubi, &ubi->free);
1920 tree_destroy(ubi, &ubi->scrub);
1921 kfree(ubi->lookuptbl);
1922}
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1935{
1936 int err;
1937 long long read_ec;
1938 struct ubi_ec_hdr *ec_hdr;
1939
1940 if (!ubi_dbg_chk_gen(ubi))
1941 return 0;
1942
1943 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1944 if (!ec_hdr)
1945 return -ENOMEM;
1946
1947 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1948 if (err && err != UBI_IO_BITFLIPS) {
1949
1950 err = 0;
1951 goto out_free;
1952 }
1953
1954 read_ec = be64_to_cpu(ec_hdr->ec);
1955 if (ec != read_ec && read_ec - ec > 1) {
1956 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1957 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1958 dump_stack();
1959 err = 1;
1960 } else
1961 err = 0;
1962
1963out_free:
1964 kfree(ec_hdr);
1965 return err;
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static int self_check_in_wl_tree(const struct ubi_device *ubi,
1978 struct ubi_wl_entry *e, struct rb_root *root)
1979{
1980 if (!ubi_dbg_chk_gen(ubi))
1981 return 0;
1982
1983 if (in_wl_tree(e, root))
1984 return 0;
1985
1986 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1987 e->pnum, e->ec, root);
1988 dump_stack();
1989 return -EINVAL;
1990}
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000static int self_check_in_pq(const struct ubi_device *ubi,
2001 struct ubi_wl_entry *e)
2002{
2003 if (!ubi_dbg_chk_gen(ubi))
2004 return 0;
2005
2006 if (in_pq(ubi, e))
2007 return 0;
2008
2009 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2010 e->pnum, e->ec);
2011 dump_stack();
2012 return -EINVAL;
2013}
2014#ifndef CONFIG_MTD_UBI_FASTMAP
2015static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
2016{
2017 struct ubi_wl_entry *e;
2018
2019 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2020 self_check_in_wl_tree(ubi, e, &ubi->free);
2021 ubi->free_count--;
2022 ubi_assert(ubi->free_count >= 0);
2023 rb_erase(&e->u.rb, &ubi->free);
2024
2025 return e;
2026}
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037static int produce_free_peb(struct ubi_device *ubi)
2038{
2039 int err;
2040
2041 while (!ubi->free.rb_node && ubi->works_count) {
2042 spin_unlock(&ubi->wl_lock);
2043
2044 dbg_wl("do one work synchronously");
2045 err = do_work(ubi);
2046
2047 spin_lock(&ubi->wl_lock);
2048 if (err)
2049 return err;
2050 }
2051
2052 return 0;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063int ubi_wl_get_peb(struct ubi_device *ubi)
2064{
2065 int err;
2066 struct ubi_wl_entry *e;
2067
2068retry:
2069 down_read(&ubi->fm_eba_sem);
2070 spin_lock(&ubi->wl_lock);
2071 if (!ubi->free.rb_node) {
2072 if (ubi->works_count == 0) {
2073 ubi_err(ubi, "no free eraseblocks");
2074 ubi_assert(list_empty(&ubi->works));
2075 spin_unlock(&ubi->wl_lock);
2076 return -ENOSPC;
2077 }
2078
2079 err = produce_free_peb(ubi);
2080 if (err < 0) {
2081 spin_unlock(&ubi->wl_lock);
2082 return err;
2083 }
2084 spin_unlock(&ubi->wl_lock);
2085 up_read(&ubi->fm_eba_sem);
2086 goto retry;
2087
2088 }
2089 e = wl_get_wle(ubi);
2090 prot_queue_add(ubi, e);
2091 spin_unlock(&ubi->wl_lock);
2092
2093 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2094 ubi->peb_size - ubi->vid_hdr_aloffset);
2095 if (err) {
2096 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2097 return err;
2098 }
2099
2100 return e->pnum;
2101}
2102#else
2103#include "fastmap-wl.c"
2104#endif
2105