1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101#include <linux/slab.h>
102#include <linux/crc32.h>
103#include <linux/freezer.h>
104#include <linux/kthread.h>
105#include "ubi.h"
106
107
108#define WL_RESERVED_PEBS 1
109
110
111
112
113
114
115
116#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
117
118
119
120
121
122
123
124
125
126
127
128
129#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
130
131
132
133
134
135#define WL_MAX_FAILURES 32
136
137static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
138static int self_check_in_wl_tree(const struct ubi_device *ubi,
139 struct ubi_wl_entry *e, struct rb_root *root);
140static int self_check_in_pq(const struct ubi_device *ubi,
141 struct ubi_wl_entry *e);
142
143#ifdef CONFIG_MTD_UBI_FASTMAP
144
145
146
147
148static void update_fastmap_work_fn(struct work_struct *wrk)
149{
150 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
151 ubi_update_fastmap(ubi);
152}
153
154
155
156
157
158
159static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
160{
161 int i;
162
163 if (!ubi->fm)
164 return 0;
165
166 for (i = 0; i < ubi->fm->used_blocks; i++)
167 if (ubi->fm->e[i]->pnum == pnum)
168 return 1;
169
170 return 0;
171}
172#else
173static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
174{
175 return 0;
176}
177#endif
178
179
180
181
182
183
184
185
186
187static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
188{
189 struct rb_node **p, *parent = NULL;
190
191 p = &root->rb_node;
192 while (*p) {
193 struct ubi_wl_entry *e1;
194
195 parent = *p;
196 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
197
198 if (e->ec < e1->ec)
199 p = &(*p)->rb_left;
200 else if (e->ec > e1->ec)
201 p = &(*p)->rb_right;
202 else {
203 ubi_assert(e->pnum != e1->pnum);
204 if (e->pnum < e1->pnum)
205 p = &(*p)->rb_left;
206 else
207 p = &(*p)->rb_right;
208 }
209 }
210
211 rb_link_node(&e->u.rb, parent, p);
212 rb_insert_color(&e->u.rb, root);
213}
214
215
216
217
218
219
220
221
222static int do_work(struct ubi_device *ubi)
223{
224 int err;
225 struct ubi_work *wrk;
226
227 cond_resched();
228
229
230
231
232
233
234
235 down_read(&ubi->work_sem);
236 spin_lock(&ubi->wl_lock);
237 if (list_empty(&ubi->works)) {
238 spin_unlock(&ubi->wl_lock);
239 up_read(&ubi->work_sem);
240 return 0;
241 }
242
243 wrk = list_entry(ubi->works.next, struct ubi_work, list);
244 list_del(&wrk->list);
245 ubi->works_count -= 1;
246 ubi_assert(ubi->works_count >= 0);
247 spin_unlock(&ubi->wl_lock);
248
249
250
251
252
253
254 err = wrk->func(ubi, wrk, 0);
255 if (err)
256 ubi_err("work failed with error code %d", err);
257 up_read(&ubi->work_sem);
258
259 return err;
260}
261
262
263
264
265
266
267
268
269
270
271static int produce_free_peb(struct ubi_device *ubi)
272{
273 int err;
274
275 while (!ubi->free.rb_node) {
276 spin_unlock(&ubi->wl_lock);
277
278 dbg_wl("do one work synchronously");
279 err = do_work(ubi);
280
281 spin_lock(&ubi->wl_lock);
282 if (err)
283 return err;
284 }
285
286 return 0;
287}
288
289
290
291
292
293
294
295
296
297static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
298{
299 struct rb_node *p;
300
301 p = root->rb_node;
302 while (p) {
303 struct ubi_wl_entry *e1;
304
305 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
306
307 if (e->pnum == e1->pnum) {
308 ubi_assert(e == e1);
309 return 1;
310 }
311
312 if (e->ec < e1->ec)
313 p = p->rb_left;
314 else if (e->ec > e1->ec)
315 p = p->rb_right;
316 else {
317 ubi_assert(e->pnum != e1->pnum);
318 if (e->pnum < e1->pnum)
319 p = p->rb_left;
320 else
321 p = p->rb_right;
322 }
323 }
324
325 return 0;
326}
327
328
329
330
331
332
333
334
335
336
337
338static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
339{
340 int pq_tail = ubi->pq_head - 1;
341
342 if (pq_tail < 0)
343 pq_tail = UBI_PROT_QUEUE_LEN - 1;
344 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
345 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
346 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
347}
348
349
350
351
352
353
354
355
356
357
358static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
359 struct rb_root *root, int diff)
360{
361 struct rb_node *p;
362 struct ubi_wl_entry *e, *prev_e = NULL;
363 int max;
364
365 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
366 max = e->ec + diff;
367
368 p = root->rb_node;
369 while (p) {
370 struct ubi_wl_entry *e1;
371
372 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
373 if (e1->ec >= max)
374 p = p->rb_left;
375 else {
376 p = p->rb_right;
377 prev_e = e;
378 e = e1;
379 }
380 }
381
382
383
384
385 if (prev_e && !ubi->fm_disabled &&
386 !ubi->fm && e->pnum < UBI_FM_MAX_START)
387 return prev_e;
388
389 return e;
390}
391
392
393
394
395
396
397
398
399
400
401static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
402 struct rb_root *root)
403{
404 struct ubi_wl_entry *e, *first, *last;
405
406 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
407 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
408
409 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
410 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
411
412#ifdef CONFIG_MTD_UBI_FASTMAP
413
414
415
416 if (e && !ubi->fm_disabled && !ubi->fm &&
417 e->pnum < UBI_FM_MAX_START)
418 e = rb_entry(rb_next(root->rb_node),
419 struct ubi_wl_entry, u.rb);
420#endif
421 } else
422 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
423
424 return e;
425}
426
427#ifdef CONFIG_MTD_UBI_FASTMAP
428
429
430
431
432static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
433{
434 struct rb_node *p;
435 struct ubi_wl_entry *e, *victim = NULL;
436 int max_ec = UBI_MAX_ERASECOUNTER;
437
438 ubi_rb_for_each_entry(p, e, root, u.rb) {
439 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
440 victim = e;
441 max_ec = e->ec;
442 }
443 }
444
445 return victim;
446}
447
448static int anchor_pebs_avalible(struct rb_root *root)
449{
450 struct rb_node *p;
451 struct ubi_wl_entry *e;
452
453 ubi_rb_for_each_entry(p, e, root, u.rb)
454 if (e->pnum < UBI_FM_MAX_START)
455 return 1;
456
457 return 0;
458}
459
460
461
462
463
464
465
466
467
468
469struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
470{
471 struct ubi_wl_entry *e = NULL;
472
473 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
474 goto out;
475
476 if (anchor)
477 e = find_anchor_wl_entry(&ubi->free);
478 else
479 e = find_mean_wl_entry(ubi, &ubi->free);
480
481 if (!e)
482 goto out;
483
484 self_check_in_wl_tree(ubi, e, &ubi->free);
485
486
487
488 rb_erase(&e->u.rb, &ubi->free);
489 ubi->free_count--;
490out:
491 return e;
492}
493#endif
494
495
496
497
498
499
500
501
502static int __wl_get_peb(struct ubi_device *ubi)
503{
504 int err;
505 struct ubi_wl_entry *e;
506
507retry:
508 if (!ubi->free.rb_node) {
509 if (ubi->works_count == 0) {
510 ubi_err("no free eraseblocks");
511 ubi_assert(list_empty(&ubi->works));
512 return -ENOSPC;
513 }
514
515 err = produce_free_peb(ubi);
516 if (err < 0)
517 return err;
518 goto retry;
519 }
520
521 e = find_mean_wl_entry(ubi, &ubi->free);
522 if (!e) {
523 ubi_err("no free eraseblocks");
524 return -ENOSPC;
525 }
526
527 self_check_in_wl_tree(ubi, e, &ubi->free);
528
529
530
531
532
533 rb_erase(&e->u.rb, &ubi->free);
534 ubi->free_count--;
535 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
536#ifndef CONFIG_MTD_UBI_FASTMAP
537
538
539
540 prot_queue_add(ubi, e);
541#endif
542 return e->pnum;
543}
544
545#ifdef CONFIG_MTD_UBI_FASTMAP
546
547
548
549
550
551static void return_unused_pool_pebs(struct ubi_device *ubi,
552 struct ubi_fm_pool *pool)
553{
554 int i;
555 struct ubi_wl_entry *e;
556
557 for (i = pool->used; i < pool->size; i++) {
558 e = ubi->lookuptbl[pool->pebs[i]];
559 wl_tree_add(e, &ubi->free);
560 ubi->free_count++;
561 }
562}
563
564
565
566
567
568
569static void refill_wl_pool(struct ubi_device *ubi)
570{
571 struct ubi_wl_entry *e;
572 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
573
574 return_unused_pool_pebs(ubi, pool);
575
576 for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
577 if (!ubi->free.rb_node ||
578 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
579 break;
580
581 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
582 self_check_in_wl_tree(ubi, e, &ubi->free);
583 rb_erase(&e->u.rb, &ubi->free);
584 ubi->free_count--;
585
586 pool->pebs[pool->size] = e->pnum;
587 }
588 pool->used = 0;
589}
590
591
592
593
594
595static void refill_wl_user_pool(struct ubi_device *ubi)
596{
597 struct ubi_fm_pool *pool = &ubi->fm_pool;
598
599 return_unused_pool_pebs(ubi, pool);
600
601 for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
602 if (!ubi->free.rb_node ||
603 (ubi->free_count - ubi->beb_rsvd_pebs < 1))
604 break;
605
606 pool->pebs[pool->size] = __wl_get_peb(ubi);
607 if (pool->pebs[pool->size] < 0)
608 break;
609 }
610 pool->used = 0;
611}
612
613
614
615
616
617void ubi_refill_pools(struct ubi_device *ubi)
618{
619 spin_lock(&ubi->wl_lock);
620 refill_wl_pool(ubi);
621 refill_wl_user_pool(ubi);
622 spin_unlock(&ubi->wl_lock);
623}
624
625
626
627
628int ubi_wl_get_peb(struct ubi_device *ubi)
629{
630 int ret;
631 struct ubi_fm_pool *pool = &ubi->fm_pool;
632 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
633
634 if (!pool->size || !wl_pool->size || pool->used == pool->size ||
635 wl_pool->used == wl_pool->size)
636 ubi_update_fastmap(ubi);
637
638
639 if (!pool->size)
640 ret = -ENOSPC;
641 else {
642 spin_lock(&ubi->wl_lock);
643 ret = pool->pebs[pool->used++];
644 prot_queue_add(ubi, ubi->lookuptbl[ret]);
645 spin_unlock(&ubi->wl_lock);
646 }
647
648 return ret;
649}
650
651
652
653
654
655static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
656{
657 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
658 int pnum;
659
660 if (pool->used == pool->size || !pool->size) {
661
662
663
664 schedule_work(&ubi->fm_work);
665 return NULL;
666 } else {
667 pnum = pool->pebs[pool->used++];
668 return ubi->lookuptbl[pnum];
669 }
670}
671#else
672static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
673{
674 struct ubi_wl_entry *e;
675
676 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
677 self_check_in_wl_tree(ubi, e, &ubi->free);
678 rb_erase(&e->u.rb, &ubi->free);
679
680 return e;
681}
682
683int ubi_wl_get_peb(struct ubi_device *ubi)
684{
685 int peb, err;
686
687 spin_lock(&ubi->wl_lock);
688 peb = __wl_get_peb(ubi);
689 spin_unlock(&ubi->wl_lock);
690
691 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
692 ubi->peb_size - ubi->vid_hdr_aloffset);
693 if (err) {
694 ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
695 return err;
696 }
697
698 return peb;
699}
700#endif
701
702
703
704
705
706
707
708
709
710static int prot_queue_del(struct ubi_device *ubi, int pnum)
711{
712 struct ubi_wl_entry *e;
713
714 e = ubi->lookuptbl[pnum];
715 if (!e)
716 return -ENODEV;
717
718 if (self_check_in_pq(ubi, e))
719 return -ENODEV;
720
721 list_del(&e->u.list);
722 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
723 return 0;
724}
725
726
727
728
729
730
731
732
733
734
735static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
736 int torture)
737{
738 int err;
739 struct ubi_ec_hdr *ec_hdr;
740 unsigned long long ec = e->ec;
741
742 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
743
744 err = self_check_ec(ubi, e->pnum, e->ec);
745 if (err)
746 return -EINVAL;
747
748 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
749 if (!ec_hdr)
750 return -ENOMEM;
751
752 err = ubi_io_sync_erase(ubi, e->pnum, torture);
753 if (err < 0)
754 goto out_free;
755
756 ec += err;
757 if (ec > UBI_MAX_ERASECOUNTER) {
758
759
760
761
762 ubi_err("erase counter overflow at PEB %d, EC %llu",
763 e->pnum, ec);
764 err = -EINVAL;
765 goto out_free;
766 }
767
768 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
769
770 ec_hdr->ec = cpu_to_be64(ec);
771
772 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
773 if (err)
774 goto out_free;
775
776 e->ec = ec;
777 spin_lock(&ubi->wl_lock);
778 if (e->ec > ubi->max_ec)
779 ubi->max_ec = e->ec;
780 spin_unlock(&ubi->wl_lock);
781
782out_free:
783 kfree(ec_hdr);
784 return err;
785}
786
787
788
789
790
791
792
793
794
795static void serve_prot_queue(struct ubi_device *ubi)
796{
797 struct ubi_wl_entry *e, *tmp;
798 int count;
799
800
801
802
803
804repeat:
805 count = 0;
806 spin_lock(&ubi->wl_lock);
807 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
808 dbg_wl("PEB %d EC %d protection over, move to used tree",
809 e->pnum, e->ec);
810
811 list_del(&e->u.list);
812 wl_tree_add(e, &ubi->used);
813 if (count++ > 32) {
814
815
816
817
818 spin_unlock(&ubi->wl_lock);
819 cond_resched();
820 goto repeat;
821 }
822 }
823
824 ubi->pq_head += 1;
825 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
826 ubi->pq_head = 0;
827 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
828 spin_unlock(&ubi->wl_lock);
829}
830
831
832
833
834
835
836
837
838
839static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
840{
841 spin_lock(&ubi->wl_lock);
842 list_add_tail(&wrk->list, &ubi->works);
843 ubi_assert(ubi->works_count >= 0);
844 ubi->works_count += 1;
845 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
846 wake_up_process(ubi->bgt_thread);
847 spin_unlock(&ubi->wl_lock);
848}
849
850
851
852
853
854
855
856
857
858static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
859{
860 down_read(&ubi->work_sem);
861 __schedule_ubi_work(ubi, wrk);
862 up_read(&ubi->work_sem);
863}
864
865static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
866 int cancel);
867
868#ifdef CONFIG_MTD_UBI_FASTMAP
869
870
871
872
873int ubi_is_erase_work(struct ubi_work *wrk)
874{
875 return wrk->func == erase_worker;
876}
877#endif
878
879
880
881
882
883
884
885
886
887
888
889
890static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
891 int vol_id, int lnum, int torture)
892{
893 struct ubi_work *wl_wrk;
894
895 ubi_assert(e);
896 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
897
898 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
899 e->pnum, e->ec, torture);
900
901 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
902 if (!wl_wrk)
903 return -ENOMEM;
904
905 wl_wrk->func = &erase_worker;
906 wl_wrk->e = e;
907 wl_wrk->vol_id = vol_id;
908 wl_wrk->lnum = lnum;
909 wl_wrk->torture = torture;
910
911 schedule_ubi_work(ubi, wl_wrk);
912 return 0;
913}
914
915
916
917
918
919
920
921
922
923
924static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
925 int vol_id, int lnum, int torture)
926{
927 struct ubi_work *wl_wrk;
928
929 dbg_wl("sync erase of PEB %i", e->pnum);
930
931 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
932 if (!wl_wrk)
933 return -ENOMEM;
934
935 wl_wrk->e = e;
936 wl_wrk->vol_id = vol_id;
937 wl_wrk->lnum = lnum;
938 wl_wrk->torture = torture;
939
940 return erase_worker(ubi, wl_wrk, 0);
941}
942
943#ifdef CONFIG_MTD_UBI_FASTMAP
944
945
946
947
948
949
950
951
952
953
954int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
955 int lnum, int torture)
956{
957 struct ubi_wl_entry *e;
958 int vol_id, pnum = fm_e->pnum;
959
960 dbg_wl("PEB %d", pnum);
961
962 ubi_assert(pnum >= 0);
963 ubi_assert(pnum < ubi->peb_count);
964
965 spin_lock(&ubi->wl_lock);
966 e = ubi->lookuptbl[pnum];
967
968
969
970
971
972 if (!e) {
973 e = fm_e;
974 ubi_assert(e->ec >= 0);
975 ubi->lookuptbl[pnum] = e;
976 } else {
977 e->ec = fm_e->ec;
978 kfree(fm_e);
979 }
980
981 spin_unlock(&ubi->wl_lock);
982
983 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
984 return schedule_erase(ubi, e, vol_id, lnum, torture);
985}
986#endif
987
988
989
990
991
992
993
994
995
996
997
998static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
999 int cancel)
1000{
1001 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1002 int vol_id = -1, uninitialized_var(lnum);
1003#ifdef CONFIG_MTD_UBI_FASTMAP
1004 int anchor = wrk->anchor;
1005#endif
1006 struct ubi_wl_entry *e1, *e2;
1007 struct ubi_vid_hdr *vid_hdr;
1008
1009 kfree(wrk);
1010 if (cancel)
1011 return 0;
1012
1013 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1014 if (!vid_hdr)
1015 return -ENOMEM;
1016
1017 mutex_lock(&ubi->move_mutex);
1018 spin_lock(&ubi->wl_lock);
1019 ubi_assert(!ubi->move_from && !ubi->move_to);
1020 ubi_assert(!ubi->move_to_put);
1021
1022 if (!ubi->free.rb_node ||
1023 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 dbg_wl("cancel WL, a list is empty: free %d, used %d",
1035 !ubi->free.rb_node, !ubi->used.rb_node);
1036 goto out_cancel;
1037 }
1038
1039#ifdef CONFIG_MTD_UBI_FASTMAP
1040
1041 if (!anchor)
1042 anchor = !anchor_pebs_avalible(&ubi->free);
1043
1044 if (anchor) {
1045 e1 = find_anchor_wl_entry(&ubi->used);
1046 if (!e1)
1047 goto out_cancel;
1048 e2 = get_peb_for_wl(ubi);
1049 if (!e2)
1050 goto out_cancel;
1051
1052 self_check_in_wl_tree(ubi, e1, &ubi->used);
1053 rb_erase(&e1->u.rb, &ubi->used);
1054 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
1055 } else if (!ubi->scrub.rb_node) {
1056#else
1057 if (!ubi->scrub.rb_node) {
1058#endif
1059
1060
1061
1062
1063
1064 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1065 e2 = get_peb_for_wl(ubi);
1066 if (!e2)
1067 goto out_cancel;
1068
1069 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
1070 dbg_wl("no WL needed: min used EC %d, max free EC %d",
1071 e1->ec, e2->ec);
1072
1073
1074 wl_tree_add(e2, &ubi->free);
1075 goto out_cancel;
1076 }
1077 self_check_in_wl_tree(ubi, e1, &ubi->used);
1078 rb_erase(&e1->u.rb, &ubi->used);
1079 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
1080 e1->pnum, e1->ec, e2->pnum, e2->ec);
1081 } else {
1082
1083 scrubbing = 1;
1084 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
1085 e2 = get_peb_for_wl(ubi);
1086 if (!e2)
1087 goto out_cancel;
1088
1089 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
1090 rb_erase(&e1->u.rb, &ubi->scrub);
1091 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
1092 }
1093
1094 ubi->move_from = e1;
1095 ubi->move_to = e2;
1096 spin_unlock(&ubi->wl_lock);
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
1110 if (err && err != UBI_IO_BITFLIPS) {
1111 if (err == UBI_IO_FF) {
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 dbg_wl("PEB %d has no VID header", e1->pnum);
1123 protect = 1;
1124 goto out_not_moved;
1125 } else if (err == UBI_IO_FF_BITFLIPS) {
1126
1127
1128
1129
1130
1131 dbg_wl("PEB %d has no VID header but has bit-flips",
1132 e1->pnum);
1133 scrubbing = 1;
1134 goto out_not_moved;
1135 }
1136
1137 ubi_err("error %d while reading VID header from PEB %d",
1138 err, e1->pnum);
1139 goto out_error;
1140 }
1141
1142 vol_id = be32_to_cpu(vid_hdr->vol_id);
1143 lnum = be32_to_cpu(vid_hdr->lnum);
1144
1145 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
1146 if (err) {
1147 if (err == MOVE_CANCEL_RACE) {
1148
1149
1150
1151
1152
1153
1154
1155 protect = 1;
1156 goto out_not_moved;
1157 }
1158 if (err == MOVE_RETRY) {
1159 scrubbing = 1;
1160 goto out_not_moved;
1161 }
1162 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
1163 err == MOVE_TARGET_RD_ERR) {
1164
1165
1166
1167 torture = 1;
1168 goto out_not_moved;
1169 }
1170
1171 if (err == MOVE_SOURCE_RD_ERR) {
1172
1173
1174
1175
1176
1177
1178
1179
1180 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
1181 ubi_err("too many erroneous eraseblocks (%d)",
1182 ubi->erroneous_peb_count);
1183 goto out_error;
1184 }
1185 erroneous = 1;
1186 goto out_not_moved;
1187 }
1188
1189 if (err < 0)
1190 goto out_error;
1191
1192 ubi_assert(0);
1193 }
1194
1195
1196 if (scrubbing)
1197 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
1198 e1->pnum, vol_id, lnum, e2->pnum);
1199 ubi_free_vid_hdr(ubi, vid_hdr);
1200
1201 spin_lock(&ubi->wl_lock);
1202 if (!ubi->move_to_put) {
1203 wl_tree_add(e2, &ubi->used);
1204 e2 = NULL;
1205 }
1206 ubi->move_from = ubi->move_to = NULL;
1207 ubi->move_to_put = ubi->wl_scheduled = 0;
1208 spin_unlock(&ubi->wl_lock);
1209
1210 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
1211 if (err) {
1212 kmem_cache_free(ubi_wl_entry_slab, e1);
1213 if (e2)
1214 kmem_cache_free(ubi_wl_entry_slab, e2);
1215 goto out_ro;
1216 }
1217
1218 if (e2) {
1219
1220
1221
1222
1223 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
1224 e2->pnum, vol_id, lnum);
1225 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
1226 if (err) {
1227 kmem_cache_free(ubi_wl_entry_slab, e2);
1228 goto out_ro;
1229 }
1230 }
1231
1232 dbg_wl("done");
1233 mutex_unlock(&ubi->move_mutex);
1234 return 0;
1235
1236
1237
1238
1239
1240
1241out_not_moved:
1242 if (vol_id != -1)
1243 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
1244 e1->pnum, vol_id, lnum, e2->pnum, err);
1245 else
1246 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
1247 e1->pnum, e2->pnum, err);
1248 spin_lock(&ubi->wl_lock);
1249 if (protect)
1250 prot_queue_add(ubi, e1);
1251 else if (erroneous) {
1252 wl_tree_add(e1, &ubi->erroneous);
1253 ubi->erroneous_peb_count += 1;
1254 } else if (scrubbing)
1255 wl_tree_add(e1, &ubi->scrub);
1256 else
1257 wl_tree_add(e1, &ubi->used);
1258 ubi_assert(!ubi->move_to_put);
1259 ubi->move_from = ubi->move_to = NULL;
1260 ubi->wl_scheduled = 0;
1261 spin_unlock(&ubi->wl_lock);
1262
1263 ubi_free_vid_hdr(ubi, vid_hdr);
1264 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
1265 if (err) {
1266 kmem_cache_free(ubi_wl_entry_slab, e2);
1267 goto out_ro;
1268 }
1269 mutex_unlock(&ubi->move_mutex);
1270 return 0;
1271
1272out_error:
1273 if (vol_id != -1)
1274 ubi_err("error %d while moving PEB %d to PEB %d",
1275 err, e1->pnum, e2->pnum);
1276 else
1277 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
1278 err, e1->pnum, vol_id, lnum, e2->pnum);
1279 spin_lock(&ubi->wl_lock);
1280 ubi->move_from = ubi->move_to = NULL;
1281 ubi->move_to_put = ubi->wl_scheduled = 0;
1282 spin_unlock(&ubi->wl_lock);
1283
1284 ubi_free_vid_hdr(ubi, vid_hdr);
1285 kmem_cache_free(ubi_wl_entry_slab, e1);
1286 kmem_cache_free(ubi_wl_entry_slab, e2);
1287
1288out_ro:
1289 ubi_ro_mode(ubi);
1290 mutex_unlock(&ubi->move_mutex);
1291 ubi_assert(err != 0);
1292 return err < 0 ? err : -EIO;
1293
1294out_cancel:
1295 ubi->wl_scheduled = 0;
1296 spin_unlock(&ubi->wl_lock);
1297 mutex_unlock(&ubi->move_mutex);
1298 ubi_free_vid_hdr(ubi, vid_hdr);
1299 return 0;
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1312{
1313 int err = 0;
1314 struct ubi_wl_entry *e1;
1315 struct ubi_wl_entry *e2;
1316 struct ubi_work *wrk;
1317
1318 spin_lock(&ubi->wl_lock);
1319 if (ubi->wl_scheduled)
1320
1321 goto out_unlock;
1322
1323
1324
1325
1326
1327 if (!ubi->scrub.rb_node) {
1328 if (!ubi->used.rb_node || !ubi->free.rb_node)
1329
1330 goto out_unlock;
1331
1332
1333
1334
1335
1336
1337
1338 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1339 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1340
1341 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1342 goto out_unlock;
1343 dbg_wl("schedule wear-leveling");
1344 } else
1345 dbg_wl("schedule scrubbing");
1346
1347 ubi->wl_scheduled = 1;
1348 spin_unlock(&ubi->wl_lock);
1349
1350 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1351 if (!wrk) {
1352 err = -ENOMEM;
1353 goto out_cancel;
1354 }
1355
1356 wrk->anchor = 0;
1357 wrk->func = &wear_leveling_worker;
1358 if (nested)
1359 __schedule_ubi_work(ubi, wrk);
1360 else
1361 schedule_ubi_work(ubi, wrk);
1362 return err;
1363
1364out_cancel:
1365 spin_lock(&ubi->wl_lock);
1366 ubi->wl_scheduled = 0;
1367out_unlock:
1368 spin_unlock(&ubi->wl_lock);
1369 return err;
1370}
1371
1372#ifdef CONFIG_MTD_UBI_FASTMAP
1373
1374
1375
1376
1377int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1378{
1379 struct ubi_work *wrk;
1380
1381 spin_lock(&ubi->wl_lock);
1382 if (ubi->wl_scheduled) {
1383 spin_unlock(&ubi->wl_lock);
1384 return 0;
1385 }
1386 ubi->wl_scheduled = 1;
1387 spin_unlock(&ubi->wl_lock);
1388
1389 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1390 if (!wrk) {
1391 spin_lock(&ubi->wl_lock);
1392 ubi->wl_scheduled = 0;
1393 spin_unlock(&ubi->wl_lock);
1394 return -ENOMEM;
1395 }
1396
1397 wrk->anchor = 1;
1398 wrk->func = &wear_leveling_worker;
1399 schedule_ubi_work(ubi, wrk);
1400 return 0;
1401}
1402#endif
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1416 int cancel)
1417{
1418 struct ubi_wl_entry *e = wl_wrk->e;
1419 int pnum = e->pnum;
1420 int vol_id = wl_wrk->vol_id;
1421 int lnum = wl_wrk->lnum;
1422 int err, available_consumed = 0;
1423
1424 if (cancel) {
1425 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1426 kfree(wl_wrk);
1427 kmem_cache_free(ubi_wl_entry_slab, e);
1428 return 0;
1429 }
1430
1431 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1432 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1433
1434 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1435
1436 err = sync_erase(ubi, e, wl_wrk->torture);
1437 if (!err) {
1438
1439 kfree(wl_wrk);
1440
1441 spin_lock(&ubi->wl_lock);
1442 wl_tree_add(e, &ubi->free);
1443 ubi->free_count++;
1444 spin_unlock(&ubi->wl_lock);
1445
1446
1447
1448
1449
1450 serve_prot_queue(ubi);
1451
1452
1453 err = ensure_wear_leveling(ubi, 1);
1454 return err;
1455 }
1456
1457 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1458 kfree(wl_wrk);
1459
1460 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1461 err == -EBUSY) {
1462 int err1;
1463
1464
1465 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1466 if (err1) {
1467 err = err1;
1468 goto out_ro;
1469 }
1470 return err;
1471 }
1472
1473 kmem_cache_free(ubi_wl_entry_slab, e);
1474 if (err != -EIO)
1475
1476
1477
1478
1479
1480 goto out_ro;
1481
1482
1483
1484 if (!ubi->bad_allowed) {
1485 ubi_err("bad physical eraseblock %d detected", pnum);
1486 goto out_ro;
1487 }
1488
1489 spin_lock(&ubi->volumes_lock);
1490 if (ubi->beb_rsvd_pebs == 0) {
1491 if (ubi->avail_pebs == 0) {
1492 spin_unlock(&ubi->volumes_lock);
1493 ubi_err("no reserved/available physical eraseblocks");
1494 goto out_ro;
1495 }
1496 ubi->avail_pebs -= 1;
1497 available_consumed = 1;
1498 }
1499 spin_unlock(&ubi->volumes_lock);
1500
1501 ubi_msg("mark PEB %d as bad", pnum);
1502 err = ubi_io_mark_bad(ubi, pnum);
1503 if (err)
1504 goto out_ro;
1505
1506 spin_lock(&ubi->volumes_lock);
1507 if (ubi->beb_rsvd_pebs > 0) {
1508 if (available_consumed) {
1509
1510
1511
1512
1513 ubi->avail_pebs += 1;
1514 available_consumed = 0;
1515 }
1516 ubi->beb_rsvd_pebs -= 1;
1517 }
1518 ubi->bad_peb_count += 1;
1519 ubi->good_peb_count -= 1;
1520 ubi_calculate_reserved(ubi);
1521 if (available_consumed)
1522 ubi_warn("no PEBs in the reserved pool, used an available PEB");
1523 else if (ubi->beb_rsvd_pebs)
1524 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1525 else
1526 ubi_warn("last PEB from the reserve was used");
1527 spin_unlock(&ubi->volumes_lock);
1528
1529 return err;
1530
1531out_ro:
1532 if (available_consumed) {
1533 spin_lock(&ubi->volumes_lock);
1534 ubi->avail_pebs += 1;
1535 spin_unlock(&ubi->volumes_lock);
1536 }
1537 ubi_ro_mode(ubi);
1538 return err;
1539}
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1555 int pnum, int torture)
1556{
1557 int err;
1558 struct ubi_wl_entry *e;
1559
1560 dbg_wl("PEB %d", pnum);
1561 ubi_assert(pnum >= 0);
1562 ubi_assert(pnum < ubi->peb_count);
1563
1564retry:
1565 spin_lock(&ubi->wl_lock);
1566 e = ubi->lookuptbl[pnum];
1567 if (e == ubi->move_from) {
1568
1569
1570
1571
1572
1573 dbg_wl("PEB %d is being moved, wait", pnum);
1574 spin_unlock(&ubi->wl_lock);
1575
1576
1577 mutex_lock(&ubi->move_mutex);
1578 mutex_unlock(&ubi->move_mutex);
1579 goto retry;
1580 } else if (e == ubi->move_to) {
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590 dbg_wl("PEB %d is the target of data moving", pnum);
1591 ubi_assert(!ubi->move_to_put);
1592 ubi->move_to_put = 1;
1593 spin_unlock(&ubi->wl_lock);
1594 return 0;
1595 } else {
1596 if (in_wl_tree(e, &ubi->used)) {
1597 self_check_in_wl_tree(ubi, e, &ubi->used);
1598 rb_erase(&e->u.rb, &ubi->used);
1599 } else if (in_wl_tree(e, &ubi->scrub)) {
1600 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1601 rb_erase(&e->u.rb, &ubi->scrub);
1602 } else if (in_wl_tree(e, &ubi->erroneous)) {
1603 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1604 rb_erase(&e->u.rb, &ubi->erroneous);
1605 ubi->erroneous_peb_count -= 1;
1606 ubi_assert(ubi->erroneous_peb_count >= 0);
1607
1608 torture = 1;
1609 } else {
1610 err = prot_queue_del(ubi, e->pnum);
1611 if (err) {
1612 ubi_err("PEB %d not found", pnum);
1613 ubi_ro_mode(ubi);
1614 spin_unlock(&ubi->wl_lock);
1615 return err;
1616 }
1617 }
1618 }
1619 spin_unlock(&ubi->wl_lock);
1620
1621 err = schedule_erase(ubi, e, vol_id, lnum, torture);
1622 if (err) {
1623 spin_lock(&ubi->wl_lock);
1624 wl_tree_add(e, &ubi->used);
1625 spin_unlock(&ubi->wl_lock);
1626 }
1627
1628 return err;
1629}
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1642{
1643 struct ubi_wl_entry *e;
1644
1645 ubi_msg("schedule PEB %d for scrubbing", pnum);
1646
1647retry:
1648 spin_lock(&ubi->wl_lock);
1649 e = ubi->lookuptbl[pnum];
1650 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1651 in_wl_tree(e, &ubi->erroneous)) {
1652 spin_unlock(&ubi->wl_lock);
1653 return 0;
1654 }
1655
1656 if (e == ubi->move_to) {
1657
1658
1659
1660
1661
1662
1663 spin_unlock(&ubi->wl_lock);
1664 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1665 yield();
1666 goto retry;
1667 }
1668
1669 if (in_wl_tree(e, &ubi->used)) {
1670 self_check_in_wl_tree(ubi, e, &ubi->used);
1671 rb_erase(&e->u.rb, &ubi->used);
1672 } else {
1673 int err;
1674
1675 err = prot_queue_del(ubi, e->pnum);
1676 if (err) {
1677 ubi_err("PEB %d not found", pnum);
1678 ubi_ro_mode(ubi);
1679 spin_unlock(&ubi->wl_lock);
1680 return err;
1681 }
1682 }
1683
1684 wl_tree_add(e, &ubi->scrub);
1685 spin_unlock(&ubi->wl_lock);
1686
1687
1688
1689
1690
1691 return ensure_wear_leveling(ubi, 0);
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1707{
1708 int err = 0;
1709 int found = 1;
1710
1711
1712
1713
1714
1715 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1716 vol_id, lnum, ubi->works_count);
1717
1718 while (found) {
1719 struct ubi_work *wrk;
1720 found = 0;
1721
1722 down_read(&ubi->work_sem);
1723 spin_lock(&ubi->wl_lock);
1724 list_for_each_entry(wrk, &ubi->works, list) {
1725 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1726 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1727 list_del(&wrk->list);
1728 ubi->works_count -= 1;
1729 ubi_assert(ubi->works_count >= 0);
1730 spin_unlock(&ubi->wl_lock);
1731
1732 err = wrk->func(ubi, wrk, 0);
1733 if (err) {
1734 up_read(&ubi->work_sem);
1735 return err;
1736 }
1737
1738 spin_lock(&ubi->wl_lock);
1739 found = 1;
1740 break;
1741 }
1742 }
1743 spin_unlock(&ubi->wl_lock);
1744 up_read(&ubi->work_sem);
1745 }
1746
1747
1748
1749
1750
1751 down_write(&ubi->work_sem);
1752 up_write(&ubi->work_sem);
1753
1754 return err;
1755}
1756
1757
1758
1759
1760
1761static void tree_destroy(struct rb_root *root)
1762{
1763 struct rb_node *rb;
1764 struct ubi_wl_entry *e;
1765
1766 rb = root->rb_node;
1767 while (rb) {
1768 if (rb->rb_left)
1769 rb = rb->rb_left;
1770 else if (rb->rb_right)
1771 rb = rb->rb_right;
1772 else {
1773 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1774
1775 rb = rb_parent(rb);
1776 if (rb) {
1777 if (rb->rb_left == &e->u.rb)
1778 rb->rb_left = NULL;
1779 else
1780 rb->rb_right = NULL;
1781 }
1782
1783 kmem_cache_free(ubi_wl_entry_slab, e);
1784 }
1785 }
1786}
1787
1788
1789
1790
1791
1792int ubi_thread(void *u)
1793{
1794 int failures = 0;
1795 struct ubi_device *ubi = u;
1796
1797 ubi_msg("background thread \"%s\" started, PID %d",
1798 ubi->bgt_name, task_pid_nr(current));
1799
1800 set_freezable();
1801 for (;;) {
1802 int err;
1803
1804 if (kthread_should_stop())
1805 break;
1806
1807 if (try_to_freeze())
1808 continue;
1809
1810 spin_lock(&ubi->wl_lock);
1811 if (list_empty(&ubi->works) || ubi->ro_mode ||
1812 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1813 set_current_state(TASK_INTERRUPTIBLE);
1814 spin_unlock(&ubi->wl_lock);
1815 schedule();
1816 continue;
1817 }
1818 spin_unlock(&ubi->wl_lock);
1819
1820 err = do_work(ubi);
1821 if (err) {
1822 ubi_err("%s: work failed with error code %d",
1823 ubi->bgt_name, err);
1824 if (failures++ > WL_MAX_FAILURES) {
1825
1826
1827
1828
1829 ubi_msg("%s: %d consecutive failures",
1830 ubi->bgt_name, WL_MAX_FAILURES);
1831 ubi_ro_mode(ubi);
1832 ubi->thread_enabled = 0;
1833 continue;
1834 }
1835 } else
1836 failures = 0;
1837
1838 cond_resched();
1839 }
1840
1841 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1842 return 0;
1843}
1844
1845
1846
1847
1848
1849static void cancel_pending(struct ubi_device *ubi)
1850{
1851 while (!list_empty(&ubi->works)) {
1852 struct ubi_work *wrk;
1853
1854 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1855 list_del(&wrk->list);
1856 wrk->func(ubi, wrk, 1);
1857 ubi->works_count -= 1;
1858 ubi_assert(ubi->works_count >= 0);
1859 }
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1871{
1872 int err, i, reserved_pebs, found_pebs = 0;
1873 struct rb_node *rb1, *rb2;
1874 struct ubi_ainf_volume *av;
1875 struct ubi_ainf_peb *aeb, *tmp;
1876 struct ubi_wl_entry *e;
1877
1878 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1879 spin_lock_init(&ubi->wl_lock);
1880 mutex_init(&ubi->move_mutex);
1881 init_rwsem(&ubi->work_sem);
1882 ubi->max_ec = ai->max_ec;
1883 INIT_LIST_HEAD(&ubi->works);
1884#ifdef CONFIG_MTD_UBI_FASTMAP
1885 INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1886#endif
1887
1888 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1889
1890 err = -ENOMEM;
1891 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1892 if (!ubi->lookuptbl)
1893 return err;
1894
1895 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1896 INIT_LIST_HEAD(&ubi->pq[i]);
1897 ubi->pq_head = 0;
1898
1899 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1900 cond_resched();
1901
1902 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1903 if (!e)
1904 goto out_free;
1905
1906 e->pnum = aeb->pnum;
1907 e->ec = aeb->ec;
1908 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1909 ubi->lookuptbl[e->pnum] = e;
1910 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1911 kmem_cache_free(ubi_wl_entry_slab, e);
1912 goto out_free;
1913 }
1914
1915 found_pebs++;
1916 }
1917
1918 ubi->free_count = 0;
1919 list_for_each_entry(aeb, &ai->free, u.list) {
1920 cond_resched();
1921
1922 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1923 if (!e)
1924 goto out_free;
1925
1926 e->pnum = aeb->pnum;
1927 e->ec = aeb->ec;
1928 ubi_assert(e->ec >= 0);
1929 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1930
1931 wl_tree_add(e, &ubi->free);
1932 ubi->free_count++;
1933
1934 ubi->lookuptbl[e->pnum] = e;
1935
1936 found_pebs++;
1937 }
1938
1939 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1940 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1941 cond_resched();
1942
1943 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1944 if (!e)
1945 goto out_free;
1946
1947 e->pnum = aeb->pnum;
1948 e->ec = aeb->ec;
1949 ubi->lookuptbl[e->pnum] = e;
1950
1951 if (!aeb->scrub) {
1952 dbg_wl("add PEB %d EC %d to the used tree",
1953 e->pnum, e->ec);
1954 wl_tree_add(e, &ubi->used);
1955 } else {
1956 dbg_wl("add PEB %d EC %d to the scrub tree",
1957 e->pnum, e->ec);
1958 wl_tree_add(e, &ubi->scrub);
1959 }
1960
1961 found_pebs++;
1962 }
1963 }
1964
1965 dbg_wl("found %i PEBs", found_pebs);
1966
1967 if (ubi->fm)
1968 ubi_assert(ubi->good_peb_count == \
1969 found_pebs + ubi->fm->used_blocks);
1970 else
1971 ubi_assert(ubi->good_peb_count == found_pebs);
1972
1973 reserved_pebs = WL_RESERVED_PEBS;
1974#ifdef CONFIG_MTD_UBI_FASTMAP
1975
1976 reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1977#endif
1978
1979 if (ubi->avail_pebs < reserved_pebs) {
1980 ubi_err("no enough physical eraseblocks (%d, need %d)",
1981 ubi->avail_pebs, reserved_pebs);
1982 if (ubi->corr_peb_count)
1983 ubi_err("%d PEBs are corrupted and not used",
1984 ubi->corr_peb_count);
1985 goto out_free;
1986 }
1987 ubi->avail_pebs -= reserved_pebs;
1988 ubi->rsvd_pebs += reserved_pebs;
1989
1990
1991 err = ensure_wear_leveling(ubi, 0);
1992 if (err)
1993 goto out_free;
1994
1995 return 0;
1996
1997out_free:
1998 cancel_pending(ubi);
1999 tree_destroy(&ubi->used);
2000 tree_destroy(&ubi->free);
2001 tree_destroy(&ubi->scrub);
2002 kfree(ubi->lookuptbl);
2003 return err;
2004}
2005
2006
2007
2008
2009
2010static void protection_queue_destroy(struct ubi_device *ubi)
2011{
2012 int i;
2013 struct ubi_wl_entry *e, *tmp;
2014
2015 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
2016 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
2017 list_del(&e->u.list);
2018 kmem_cache_free(ubi_wl_entry_slab, e);
2019 }
2020 }
2021}
2022
2023
2024
2025
2026
2027void ubi_wl_close(struct ubi_device *ubi)
2028{
2029 dbg_wl("close the WL sub-system");
2030 cancel_pending(ubi);
2031 protection_queue_destroy(ubi);
2032 tree_destroy(&ubi->used);
2033 tree_destroy(&ubi->erroneous);
2034 tree_destroy(&ubi->free);
2035 tree_destroy(&ubi->scrub);
2036 kfree(ubi->lookuptbl);
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
2050{
2051 int err;
2052 long long read_ec;
2053 struct ubi_ec_hdr *ec_hdr;
2054
2055 if (!ubi_dbg_chk_gen(ubi))
2056 return 0;
2057
2058 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2059 if (!ec_hdr)
2060 return -ENOMEM;
2061
2062 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2063 if (err && err != UBI_IO_BITFLIPS) {
2064
2065 err = 0;
2066 goto out_free;
2067 }
2068
2069 read_ec = be64_to_cpu(ec_hdr->ec);
2070 if (ec != read_ec && read_ec - ec > 1) {
2071 ubi_err("self-check failed for PEB %d", pnum);
2072 ubi_err("read EC is %lld, should be %d", read_ec, ec);
2073 dump_stack();
2074 err = 1;
2075 } else
2076 err = 0;
2077
2078out_free:
2079 kfree(ec_hdr);
2080 return err;
2081}
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092static int self_check_in_wl_tree(const struct ubi_device *ubi,
2093 struct ubi_wl_entry *e, struct rb_root *root)
2094{
2095 if (!ubi_dbg_chk_gen(ubi))
2096 return 0;
2097
2098 if (in_wl_tree(e, root))
2099 return 0;
2100
2101 ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
2102 e->pnum, e->ec, root);
2103 dump_stack();
2104 return -EINVAL;
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115static int self_check_in_pq(const struct ubi_device *ubi,
2116 struct ubi_wl_entry *e)
2117{
2118 struct ubi_wl_entry *p;
2119 int i;
2120
2121 if (!ubi_dbg_chk_gen(ubi))
2122 return 0;
2123
2124 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
2125 list_for_each_entry(p, &ubi->pq[i], u.list)
2126 if (p == e)
2127 return 0;
2128
2129 ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
2130 e->pnum, e->ec);
2131 dump_stack();
2132 return -EINVAL;
2133}
2134