1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88#ifndef __UBOOT__
89#include <log.h>
90#include <dm/devres.h>
91#include <linux/slab.h>
92#include <linux/crc32.h>
93#include <linux/freezer.h>
94#include <linux/kthread.h>
95#else
96#include <ubi_uboot.h>
97#endif
98
99#include "ubi.h"
100#include "wl.h"
101
102
103#define WL_RESERVED_PEBS 1
104
105
106
107
108
109
110
111#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
112
113
114
115
116
117
118
119
120
121
122
123
124#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
125
126
127
128
129
130#define WL_MAX_FAILURES 32
131
132static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
133static int self_check_in_wl_tree(const struct ubi_device *ubi,
134 struct ubi_wl_entry *e, struct rb_root *root);
135static int self_check_in_pq(const struct ubi_device *ubi,
136 struct ubi_wl_entry *e);
137
138
139
140
141
142
143
144
145
146static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
147{
148 struct rb_node **p, *parent = NULL;
149
150 p = &root->rb_node;
151 while (*p) {
152 struct ubi_wl_entry *e1;
153
154 parent = *p;
155 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
156
157 if (e->ec < e1->ec)
158 p = &(*p)->rb_left;
159 else if (e->ec > e1->ec)
160 p = &(*p)->rb_right;
161 else {
162 ubi_assert(e->pnum != e1->pnum);
163 if (e->pnum < e1->pnum)
164 p = &(*p)->rb_left;
165 else
166 p = &(*p)->rb_right;
167 }
168 }
169
170 rb_link_node(&e->u.rb, parent, p);
171 rb_insert_color(&e->u.rb, root);
172}
173
174
175
176
177
178
179
180
181
182static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
183{
184 ubi->lookuptbl[e->pnum] = NULL;
185 kmem_cache_free(ubi_wl_entry_slab, e);
186}
187
188
189
190
191
192
193
194
195static int do_work(struct ubi_device *ubi)
196{
197 int err;
198 struct ubi_work *wrk;
199
200 cond_resched();
201
202
203
204
205
206
207
208 down_read(&ubi->work_sem);
209 spin_lock(&ubi->wl_lock);
210 if (list_empty(&ubi->works)) {
211 spin_unlock(&ubi->wl_lock);
212 up_read(&ubi->work_sem);
213 return 0;
214 }
215
216 wrk = list_entry(ubi->works.next, struct ubi_work, list);
217 list_del(&wrk->list);
218 ubi->works_count -= 1;
219 ubi_assert(ubi->works_count >= 0);
220 spin_unlock(&ubi->wl_lock);
221
222
223
224
225
226
227 err = wrk->func(ubi, wrk, 0);
228 if (err)
229 ubi_err(ubi, "work failed with error code %d", err);
230 up_read(&ubi->work_sem);
231
232 return err;
233}
234
235
236
237
238
239
240
241
242
243static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
244{
245 struct rb_node *p;
246
247 p = root->rb_node;
248 while (p) {
249 struct ubi_wl_entry *e1;
250
251 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
252
253 if (e->pnum == e1->pnum) {
254 ubi_assert(e == e1);
255 return 1;
256 }
257
258 if (e->ec < e1->ec)
259 p = p->rb_left;
260 else if (e->ec > e1->ec)
261 p = p->rb_right;
262 else {
263 ubi_assert(e->pnum != e1->pnum);
264 if (e->pnum < e1->pnum)
265 p = p->rb_left;
266 else
267 p = p->rb_right;
268 }
269 }
270
271 return 0;
272}
273
274
275
276
277
278
279
280
281
282
283
284static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
285{
286 int pq_tail = ubi->pq_head - 1;
287
288 if (pq_tail < 0)
289 pq_tail = UBI_PROT_QUEUE_LEN - 1;
290 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
291 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
292 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
293}
294
295
296
297
298
299
300
301
302
303
304static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
305 struct rb_root *root, int diff)
306{
307 struct rb_node *p;
308 struct ubi_wl_entry *e, *prev_e = NULL;
309 int max;
310
311 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
312 max = e->ec + diff;
313
314 p = root->rb_node;
315 while (p) {
316 struct ubi_wl_entry *e1;
317
318 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
319 if (e1->ec >= max)
320 p = p->rb_left;
321 else {
322 p = p->rb_right;
323 prev_e = e;
324 e = e1;
325 }
326 }
327
328
329
330
331 if (prev_e && !ubi->fm_disabled &&
332 !ubi->fm && e->pnum < UBI_FM_MAX_START)
333 return prev_e;
334
335 return e;
336}
337
338
339
340
341
342
343
344
345
346
347static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
348 struct rb_root *root)
349{
350 struct ubi_wl_entry *e, *first, *last;
351
352 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
353 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
354
355 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
356 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
357
358
359
360
361 e = may_reserve_for_fm(ubi, e, root);
362 } else
363 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
364
365 return e;
366}
367
368
369
370
371
372
373
374
375
376static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
377{
378 struct ubi_wl_entry *e;
379
380 e = find_mean_wl_entry(ubi, &ubi->free);
381 if (!e) {
382 ubi_err(ubi, "no free eraseblocks");
383 return NULL;
384 }
385
386 self_check_in_wl_tree(ubi, e, &ubi->free);
387
388
389
390
391
392 rb_erase(&e->u.rb, &ubi->free);
393 ubi->free_count--;
394 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
395
396 return e;
397}
398
399
400
401
402
403
404
405
406
407static int prot_queue_del(struct ubi_device *ubi, int pnum)
408{
409 struct ubi_wl_entry *e;
410
411 e = ubi->lookuptbl[pnum];
412 if (!e)
413 return -ENODEV;
414
415 if (self_check_in_pq(ubi, e))
416 return -ENODEV;
417
418 list_del(&e->u.list);
419 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
420 return 0;
421}
422
423
424
425
426
427
428
429
430
431
432static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
433 int torture)
434{
435 int err;
436 struct ubi_ec_hdr *ec_hdr;
437 unsigned long long ec = e->ec;
438
439 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
440
441 err = self_check_ec(ubi, e->pnum, e->ec);
442 if (err)
443 return -EINVAL;
444
445 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
446 if (!ec_hdr)
447 return -ENOMEM;
448
449 err = ubi_io_sync_erase(ubi, e->pnum, torture);
450 if (err < 0)
451 goto out_free;
452
453 ec += err;
454 if (ec > UBI_MAX_ERASECOUNTER) {
455
456
457
458
459 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
460 e->pnum, ec);
461 err = -EINVAL;
462 goto out_free;
463 }
464
465 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
466
467 ec_hdr->ec = cpu_to_be64(ec);
468
469 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
470 if (err)
471 goto out_free;
472
473 e->ec = ec;
474 spin_lock(&ubi->wl_lock);
475 if (e->ec > ubi->max_ec)
476 ubi->max_ec = e->ec;
477 spin_unlock(&ubi->wl_lock);
478
479out_free:
480 kfree(ec_hdr);
481 return err;
482}
483
484
485
486
487
488
489
490
491
492static void serve_prot_queue(struct ubi_device *ubi)
493{
494 struct ubi_wl_entry *e, *tmp;
495 int count;
496
497
498
499
500
501repeat:
502 count = 0;
503 spin_lock(&ubi->wl_lock);
504 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
505 dbg_wl("PEB %d EC %d protection over, move to used tree",
506 e->pnum, e->ec);
507
508 list_del(&e->u.list);
509 wl_tree_add(e, &ubi->used);
510 if (count++ > 32) {
511
512
513
514
515 spin_unlock(&ubi->wl_lock);
516 cond_resched();
517 goto repeat;
518 }
519 }
520
521 ubi->pq_head += 1;
522 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
523 ubi->pq_head = 0;
524 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
525 spin_unlock(&ubi->wl_lock);
526}
527
528#ifdef __UBOOT__
529void ubi_do_worker(struct ubi_device *ubi)
530{
531 int err;
532
533 if (list_empty(&ubi->works) || ubi->ro_mode ||
534 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi))
535 return;
536
537 spin_lock(&ubi->wl_lock);
538 while (!list_empty(&ubi->works)) {
539
540
541
542
543 spin_unlock(&ubi->wl_lock);
544 err = do_work(ubi);
545 spin_lock(&ubi->wl_lock);
546 if (err) {
547 ubi_err(ubi, "%s: work failed with error code %d",
548 ubi->bgt_name, err);
549 }
550 }
551 spin_unlock(&ubi->wl_lock);
552}
553#endif
554
555
556
557
558
559
560
561
562
563static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
564{
565 spin_lock(&ubi->wl_lock);
566 list_add_tail(&wrk->list, &ubi->works);
567 ubi_assert(ubi->works_count >= 0);
568 ubi->works_count += 1;
569#ifndef __UBOOT__
570 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
571 wake_up_process(ubi->bgt_thread);
572#endif
573 spin_unlock(&ubi->wl_lock);
574}
575
576
577
578
579
580
581
582
583
584static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
585{
586 down_read(&ubi->work_sem);
587 __schedule_ubi_work(ubi, wrk);
588 up_read(&ubi->work_sem);
589}
590
591static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
592 int shutdown);
593
594
595
596
597
598
599
600
601
602
603
604
605static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
606 int vol_id, int lnum, int torture)
607{
608 struct ubi_work *wl_wrk;
609
610 ubi_assert(e);
611
612 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
613 e->pnum, e->ec, torture);
614
615 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
616 if (!wl_wrk)
617 return -ENOMEM;
618
619 wl_wrk->func = &erase_worker;
620 wl_wrk->e = e;
621 wl_wrk->vol_id = vol_id;
622 wl_wrk->lnum = lnum;
623 wl_wrk->torture = torture;
624
625 schedule_ubi_work(ubi, wl_wrk);
626
627#ifdef __UBOOT__
628 ubi_do_worker(ubi);
629#endif
630 return 0;
631}
632
633
634
635
636
637
638
639
640
641
642static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
643 int vol_id, int lnum, int torture)
644{
645 struct ubi_work *wl_wrk;
646
647 dbg_wl("sync erase of PEB %i", e->pnum);
648
649 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
650 if (!wl_wrk)
651 return -ENOMEM;
652
653 wl_wrk->e = e;
654 wl_wrk->vol_id = vol_id;
655 wl_wrk->lnum = lnum;
656 wl_wrk->torture = torture;
657
658 return erase_worker(ubi, wl_wrk, 0);
659}
660
661
662
663
664
665
666
667
668
669
670
671
672static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
673 int shutdown)
674{
675 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
676 int vol_id = -1, lnum = -1;
677#ifdef CONFIG_MTD_UBI_FASTMAP
678 int anchor = wrk->anchor;
679#endif
680 struct ubi_wl_entry *e1, *e2;
681 struct ubi_vid_hdr *vid_hdr;
682
683 kfree(wrk);
684 if (shutdown)
685 return 0;
686
687 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
688 if (!vid_hdr)
689 return -ENOMEM;
690
691 mutex_lock(&ubi->move_mutex);
692 spin_lock(&ubi->wl_lock);
693 ubi_assert(!ubi->move_from && !ubi->move_to);
694 ubi_assert(!ubi->move_to_put);
695
696 if (!ubi->free.rb_node ||
697 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
698
699
700
701
702
703
704
705
706
707
708 dbg_wl("cancel WL, a list is empty: free %d, used %d",
709 !ubi->free.rb_node, !ubi->used.rb_node);
710 goto out_cancel;
711 }
712
713#ifdef CONFIG_MTD_UBI_FASTMAP
714
715 if (!anchor)
716 anchor = !anchor_pebs_avalible(&ubi->free);
717
718 if (anchor) {
719 e1 = find_anchor_wl_entry(&ubi->used);
720 if (!e1)
721 goto out_cancel;
722 e2 = get_peb_for_wl(ubi);
723 if (!e2)
724 goto out_cancel;
725
726 self_check_in_wl_tree(ubi, e1, &ubi->used);
727 rb_erase(&e1->u.rb, &ubi->used);
728 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
729 } else if (!ubi->scrub.rb_node) {
730#else
731 if (!ubi->scrub.rb_node) {
732#endif
733
734
735
736
737
738 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
739 e2 = get_peb_for_wl(ubi);
740 if (!e2)
741 goto out_cancel;
742
743 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
744 dbg_wl("no WL needed: min used EC %d, max free EC %d",
745 e1->ec, e2->ec);
746
747
748 wl_tree_add(e2, &ubi->free);
749 ubi->free_count++;
750 goto out_cancel;
751 }
752 self_check_in_wl_tree(ubi, e1, &ubi->used);
753 rb_erase(&e1->u.rb, &ubi->used);
754 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
755 e1->pnum, e1->ec, e2->pnum, e2->ec);
756 } else {
757
758 scrubbing = 1;
759 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
760 e2 = get_peb_for_wl(ubi);
761 if (!e2)
762 goto out_cancel;
763
764 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
765 rb_erase(&e1->u.rb, &ubi->scrub);
766 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
767 }
768
769 ubi->move_from = e1;
770 ubi->move_to = e2;
771 spin_unlock(&ubi->wl_lock);
772
773
774
775
776
777
778
779
780
781
782
783
784 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
785 if (err && err != UBI_IO_BITFLIPS) {
786 if (err == UBI_IO_FF) {
787
788
789
790
791
792
793
794
795
796
797 dbg_wl("PEB %d has no VID header", e1->pnum);
798 protect = 1;
799 goto out_not_moved;
800 } else if (err == UBI_IO_FF_BITFLIPS) {
801
802
803
804
805
806 dbg_wl("PEB %d has no VID header but has bit-flips",
807 e1->pnum);
808 scrubbing = 1;
809 goto out_not_moved;
810 }
811
812 ubi_err(ubi, "error %d while reading VID header from PEB %d",
813 err, e1->pnum);
814 goto out_error;
815 }
816
817 vol_id = be32_to_cpu(vid_hdr->vol_id);
818 lnum = be32_to_cpu(vid_hdr->lnum);
819
820 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
821 if (err) {
822 if (err == MOVE_CANCEL_RACE) {
823
824
825
826
827
828
829
830 protect = 1;
831 goto out_not_moved;
832 }
833 if (err == MOVE_RETRY) {
834 scrubbing = 1;
835 goto out_not_moved;
836 }
837 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
838 err == MOVE_TARGET_RD_ERR) {
839
840
841
842 torture = 1;
843 goto out_not_moved;
844 }
845
846 if (err == MOVE_SOURCE_RD_ERR) {
847
848
849
850
851
852
853
854
855 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
856 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
857 ubi->erroneous_peb_count);
858 goto out_error;
859 }
860 erroneous = 1;
861 goto out_not_moved;
862 }
863
864 if (err < 0)
865 goto out_error;
866
867 ubi_assert(0);
868 }
869
870
871 if (scrubbing)
872 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
873 e1->pnum, vol_id, lnum, e2->pnum);
874 ubi_free_vid_hdr(ubi, vid_hdr);
875
876 spin_lock(&ubi->wl_lock);
877 if (!ubi->move_to_put) {
878 wl_tree_add(e2, &ubi->used);
879 e2 = NULL;
880 }
881 ubi->move_from = ubi->move_to = NULL;
882 ubi->move_to_put = ubi->wl_scheduled = 0;
883 spin_unlock(&ubi->wl_lock);
884
885 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
886 if (err) {
887 if (e2)
888 wl_entry_destroy(ubi, e2);
889 goto out_ro;
890 }
891
892 if (e2) {
893
894
895
896
897 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
898 e2->pnum, vol_id, lnum);
899 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
900 if (err)
901 goto out_ro;
902 }
903
904 dbg_wl("done");
905 mutex_unlock(&ubi->move_mutex);
906 return 0;
907
908
909
910
911
912
913out_not_moved:
914 if (vol_id != -1)
915 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
916 e1->pnum, vol_id, lnum, e2->pnum, err);
917 else
918 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
919 e1->pnum, e2->pnum, err);
920 spin_lock(&ubi->wl_lock);
921 if (protect)
922 prot_queue_add(ubi, e1);
923 else if (erroneous) {
924 wl_tree_add(e1, &ubi->erroneous);
925 ubi->erroneous_peb_count += 1;
926 } else if (scrubbing)
927 wl_tree_add(e1, &ubi->scrub);
928 else
929 wl_tree_add(e1, &ubi->used);
930 ubi_assert(!ubi->move_to_put);
931 ubi->move_from = ubi->move_to = NULL;
932 ubi->wl_scheduled = 0;
933 spin_unlock(&ubi->wl_lock);
934
935 ubi_free_vid_hdr(ubi, vid_hdr);
936 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
937 if (err)
938 goto out_ro;
939
940 mutex_unlock(&ubi->move_mutex);
941 return 0;
942
943out_error:
944 if (vol_id != -1)
945 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
946 err, e1->pnum, e2->pnum);
947 else
948 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
949 err, e1->pnum, vol_id, lnum, e2->pnum);
950 spin_lock(&ubi->wl_lock);
951 ubi->move_from = ubi->move_to = NULL;
952 ubi->move_to_put = ubi->wl_scheduled = 0;
953 spin_unlock(&ubi->wl_lock);
954
955 ubi_free_vid_hdr(ubi, vid_hdr);
956 wl_entry_destroy(ubi, e1);
957 wl_entry_destroy(ubi, e2);
958
959out_ro:
960 ubi_ro_mode(ubi);
961 mutex_unlock(&ubi->move_mutex);
962 ubi_assert(err != 0);
963 return err < 0 ? err : -EIO;
964
965out_cancel:
966 ubi->wl_scheduled = 0;
967 spin_unlock(&ubi->wl_lock);
968 mutex_unlock(&ubi->move_mutex);
969 ubi_free_vid_hdr(ubi, vid_hdr);
970 return 0;
971}
972
973
974
975
976
977
978
979
980
981
982static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
983{
984 int err = 0;
985 struct ubi_wl_entry *e1;
986 struct ubi_wl_entry *e2;
987 struct ubi_work *wrk;
988
989 spin_lock(&ubi->wl_lock);
990 if (ubi->wl_scheduled)
991
992 goto out_unlock;
993
994
995
996
997
998 if (!ubi->scrub.rb_node) {
999 if (!ubi->used.rb_node || !ubi->free.rb_node)
1000
1001 goto out_unlock;
1002
1003
1004
1005
1006
1007
1008
1009 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1010 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1011
1012 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1013 goto out_unlock;
1014 dbg_wl("schedule wear-leveling");
1015 } else
1016 dbg_wl("schedule scrubbing");
1017
1018 ubi->wl_scheduled = 1;
1019 spin_unlock(&ubi->wl_lock);
1020
1021 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1022 if (!wrk) {
1023 err = -ENOMEM;
1024 goto out_cancel;
1025 }
1026
1027 wrk->anchor = 0;
1028 wrk->func = &wear_leveling_worker;
1029 if (nested)
1030 __schedule_ubi_work(ubi, wrk);
1031#ifndef __UBOOT__
1032 else
1033 schedule_ubi_work(ubi, wrk);
1034#else
1035 else {
1036 schedule_ubi_work(ubi, wrk);
1037 ubi_do_worker(ubi);
1038 }
1039#endif
1040 return err;
1041
1042out_cancel:
1043 spin_lock(&ubi->wl_lock);
1044 ubi->wl_scheduled = 0;
1045out_unlock:
1046 spin_unlock(&ubi->wl_lock);
1047 return err;
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1063 int shutdown)
1064{
1065 struct ubi_wl_entry *e = wl_wrk->e;
1066 int pnum = e->pnum;
1067 int vol_id = wl_wrk->vol_id;
1068 int lnum = wl_wrk->lnum;
1069 int err, available_consumed = 0;
1070
1071 if (shutdown) {
1072 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1073 kfree(wl_wrk);
1074 wl_entry_destroy(ubi, e);
1075 return 0;
1076 }
1077
1078 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1079 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1080
1081 err = sync_erase(ubi, e, wl_wrk->torture);
1082 if (!err) {
1083
1084 kfree(wl_wrk);
1085
1086 spin_lock(&ubi->wl_lock);
1087 wl_tree_add(e, &ubi->free);
1088 ubi->free_count++;
1089 spin_unlock(&ubi->wl_lock);
1090
1091
1092
1093
1094
1095 serve_prot_queue(ubi);
1096
1097
1098 err = ensure_wear_leveling(ubi, 1);
1099 return err;
1100 }
1101
1102 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1103 kfree(wl_wrk);
1104
1105 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1106 err == -EBUSY) {
1107 int err1;
1108
1109
1110 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1111 if (err1) {
1112 err = err1;
1113 goto out_ro;
1114 }
1115 return err;
1116 }
1117
1118 wl_entry_destroy(ubi, e);
1119 if (err != -EIO)
1120
1121
1122
1123
1124
1125 goto out_ro;
1126
1127
1128
1129 if (!ubi->bad_allowed) {
1130 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1131 goto out_ro;
1132 }
1133
1134 spin_lock(&ubi->volumes_lock);
1135 if (ubi->beb_rsvd_pebs == 0) {
1136 if (ubi->avail_pebs == 0) {
1137 spin_unlock(&ubi->volumes_lock);
1138 ubi_err(ubi, "no reserved/available physical eraseblocks");
1139 goto out_ro;
1140 }
1141 ubi->avail_pebs -= 1;
1142 available_consumed = 1;
1143 }
1144 spin_unlock(&ubi->volumes_lock);
1145
1146 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1147 err = ubi_io_mark_bad(ubi, pnum);
1148 if (err)
1149 goto out_ro;
1150
1151 spin_lock(&ubi->volumes_lock);
1152 if (ubi->beb_rsvd_pebs > 0) {
1153 if (available_consumed) {
1154
1155
1156
1157
1158 ubi->avail_pebs += 1;
1159 available_consumed = 0;
1160 }
1161 ubi->beb_rsvd_pebs -= 1;
1162 }
1163 ubi->bad_peb_count += 1;
1164 ubi->good_peb_count -= 1;
1165 ubi_calculate_reserved(ubi);
1166 if (available_consumed)
1167 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1168 else if (ubi->beb_rsvd_pebs)
1169 ubi_msg(ubi, "%d PEBs left in the reserve",
1170 ubi->beb_rsvd_pebs);
1171 else
1172 ubi_warn(ubi, "last PEB from the reserve was used");
1173 spin_unlock(&ubi->volumes_lock);
1174
1175 return err;
1176
1177out_ro:
1178 if (available_consumed) {
1179 spin_lock(&ubi->volumes_lock);
1180 ubi->avail_pebs += 1;
1181 spin_unlock(&ubi->volumes_lock);
1182 }
1183 ubi_ro_mode(ubi);
1184 return err;
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1201 int pnum, int torture)
1202{
1203 int err;
1204 struct ubi_wl_entry *e;
1205
1206 dbg_wl("PEB %d", pnum);
1207 ubi_assert(pnum >= 0);
1208 ubi_assert(pnum < ubi->peb_count);
1209
1210 down_read(&ubi->fm_protect);
1211
1212retry:
1213 spin_lock(&ubi->wl_lock);
1214 e = ubi->lookuptbl[pnum];
1215 if (e == ubi->move_from) {
1216
1217
1218
1219
1220
1221 dbg_wl("PEB %d is being moved, wait", pnum);
1222 spin_unlock(&ubi->wl_lock);
1223
1224
1225 mutex_lock(&ubi->move_mutex);
1226 mutex_unlock(&ubi->move_mutex);
1227 goto retry;
1228 } else if (e == ubi->move_to) {
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 dbg_wl("PEB %d is the target of data moving", pnum);
1239 ubi_assert(!ubi->move_to_put);
1240 ubi->move_to_put = 1;
1241 spin_unlock(&ubi->wl_lock);
1242 up_read(&ubi->fm_protect);
1243 return 0;
1244 } else {
1245 if (in_wl_tree(e, &ubi->used)) {
1246 self_check_in_wl_tree(ubi, e, &ubi->used);
1247 rb_erase(&e->u.rb, &ubi->used);
1248 } else if (in_wl_tree(e, &ubi->scrub)) {
1249 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1250 rb_erase(&e->u.rb, &ubi->scrub);
1251 } else if (in_wl_tree(e, &ubi->erroneous)) {
1252 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1253 rb_erase(&e->u.rb, &ubi->erroneous);
1254 ubi->erroneous_peb_count -= 1;
1255 ubi_assert(ubi->erroneous_peb_count >= 0);
1256
1257 torture = 1;
1258 } else {
1259 err = prot_queue_del(ubi, e->pnum);
1260 if (err) {
1261 ubi_err(ubi, "PEB %d not found", pnum);
1262 ubi_ro_mode(ubi);
1263 spin_unlock(&ubi->wl_lock);
1264 up_read(&ubi->fm_protect);
1265 return err;
1266 }
1267 }
1268 }
1269 spin_unlock(&ubi->wl_lock);
1270
1271 err = schedule_erase(ubi, e, vol_id, lnum, torture);
1272 if (err) {
1273 spin_lock(&ubi->wl_lock);
1274 wl_tree_add(e, &ubi->used);
1275 spin_unlock(&ubi->wl_lock);
1276 }
1277
1278 up_read(&ubi->fm_protect);
1279 return err;
1280}
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1293{
1294 struct ubi_wl_entry *e;
1295
1296 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1297
1298retry:
1299 spin_lock(&ubi->wl_lock);
1300 e = ubi->lookuptbl[pnum];
1301 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1302 in_wl_tree(e, &ubi->erroneous)) {
1303 spin_unlock(&ubi->wl_lock);
1304 return 0;
1305 }
1306
1307 if (e == ubi->move_to) {
1308
1309
1310
1311
1312
1313
1314 spin_unlock(&ubi->wl_lock);
1315 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1316 yield();
1317 goto retry;
1318 }
1319
1320 if (in_wl_tree(e, &ubi->used)) {
1321 self_check_in_wl_tree(ubi, e, &ubi->used);
1322 rb_erase(&e->u.rb, &ubi->used);
1323 } else {
1324 int err;
1325
1326 err = prot_queue_del(ubi, e->pnum);
1327 if (err) {
1328 ubi_err(ubi, "PEB %d not found", pnum);
1329 ubi_ro_mode(ubi);
1330 spin_unlock(&ubi->wl_lock);
1331 return err;
1332 }
1333 }
1334
1335 wl_tree_add(e, &ubi->scrub);
1336 spin_unlock(&ubi->wl_lock);
1337
1338
1339
1340
1341
1342 return ensure_wear_leveling(ubi, 0);
1343}
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1358{
1359 int err = 0;
1360 int found = 1;
1361
1362
1363
1364
1365
1366 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1367 vol_id, lnum, ubi->works_count);
1368
1369 while (found) {
1370 struct ubi_work *wrk, *tmp;
1371 found = 0;
1372
1373 down_read(&ubi->work_sem);
1374 spin_lock(&ubi->wl_lock);
1375 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1376 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1377 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1378 list_del(&wrk->list);
1379 ubi->works_count -= 1;
1380 ubi_assert(ubi->works_count >= 0);
1381 spin_unlock(&ubi->wl_lock);
1382
1383 err = wrk->func(ubi, wrk, 0);
1384 if (err) {
1385 up_read(&ubi->work_sem);
1386 return err;
1387 }
1388
1389 spin_lock(&ubi->wl_lock);
1390 found = 1;
1391 break;
1392 }
1393 }
1394 spin_unlock(&ubi->wl_lock);
1395 up_read(&ubi->work_sem);
1396 }
1397
1398
1399
1400
1401
1402 down_write(&ubi->work_sem);
1403 up_write(&ubi->work_sem);
1404
1405 return err;
1406}
1407
1408
1409
1410
1411
1412
1413static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1414{
1415 struct rb_node *rb;
1416 struct ubi_wl_entry *e;
1417
1418 rb = root->rb_node;
1419 while (rb) {
1420 if (rb->rb_left)
1421 rb = rb->rb_left;
1422 else if (rb->rb_right)
1423 rb = rb->rb_right;
1424 else {
1425 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1426
1427 rb = rb_parent(rb);
1428 if (rb) {
1429 if (rb->rb_left == &e->u.rb)
1430 rb->rb_left = NULL;
1431 else
1432 rb->rb_right = NULL;
1433 }
1434
1435 wl_entry_destroy(ubi, e);
1436 }
1437 }
1438}
1439
1440
1441
1442
1443
1444int ubi_thread(void *u)
1445{
1446 int failures = 0;
1447 struct ubi_device *ubi = u;
1448
1449 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1450 ubi->bgt_name, task_pid_nr(current));
1451
1452 set_freezable();
1453 for (;;) {
1454 int err;
1455
1456 if (kthread_should_stop())
1457 break;
1458
1459 if (try_to_freeze())
1460 continue;
1461
1462 spin_lock(&ubi->wl_lock);
1463 if (list_empty(&ubi->works) || ubi->ro_mode ||
1464 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1465 set_current_state(TASK_INTERRUPTIBLE);
1466 spin_unlock(&ubi->wl_lock);
1467 schedule();
1468 continue;
1469 }
1470 spin_unlock(&ubi->wl_lock);
1471
1472 err = do_work(ubi);
1473 if (err) {
1474 ubi_err(ubi, "%s: work failed with error code %d",
1475 ubi->bgt_name, err);
1476 if (failures++ > WL_MAX_FAILURES) {
1477
1478
1479
1480
1481 ubi_msg(ubi, "%s: %d consecutive failures",
1482 ubi->bgt_name, WL_MAX_FAILURES);
1483 ubi_ro_mode(ubi);
1484 ubi->thread_enabled = 0;
1485 continue;
1486 }
1487 } else
1488 failures = 0;
1489
1490 cond_resched();
1491 }
1492
1493 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1494 return 0;
1495}
1496
1497
1498
1499
1500
1501static void shutdown_work(struct ubi_device *ubi)
1502{
1503#ifdef CONFIG_MTD_UBI_FASTMAP
1504#ifndef __UBOOT__
1505 flush_work(&ubi->fm_work);
1506#else
1507
1508#endif
1509#endif
1510 while (!list_empty(&ubi->works)) {
1511 struct ubi_work *wrk;
1512
1513 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1514 list_del(&wrk->list);
1515 wrk->func(ubi, wrk, 1);
1516 ubi->works_count -= 1;
1517 ubi_assert(ubi->works_count >= 0);
1518 }
1519}
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1530{
1531 int err, i, reserved_pebs, found_pebs = 0;
1532 struct rb_node *rb1, *rb2;
1533 struct ubi_ainf_volume *av;
1534 struct ubi_ainf_peb *aeb, *tmp;
1535 struct ubi_wl_entry *e;
1536
1537 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1538 spin_lock_init(&ubi->wl_lock);
1539 mutex_init(&ubi->move_mutex);
1540 init_rwsem(&ubi->work_sem);
1541 ubi->max_ec = ai->max_ec;
1542 INIT_LIST_HEAD(&ubi->works);
1543
1544 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1545
1546 err = -ENOMEM;
1547 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1548 if (!ubi->lookuptbl)
1549 return err;
1550
1551 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1552 INIT_LIST_HEAD(&ubi->pq[i]);
1553 ubi->pq_head = 0;
1554
1555 ubi->free_count = 0;
1556 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1557 cond_resched();
1558
1559 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1560 if (!e)
1561 goto out_free;
1562
1563 e->pnum = aeb->pnum;
1564 e->ec = aeb->ec;
1565 ubi->lookuptbl[e->pnum] = e;
1566 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1567 wl_entry_destroy(ubi, e);
1568 goto out_free;
1569 }
1570
1571 found_pebs++;
1572 }
1573
1574 list_for_each_entry(aeb, &ai->free, u.list) {
1575 cond_resched();
1576
1577 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1578 if (!e)
1579 goto out_free;
1580
1581 e->pnum = aeb->pnum;
1582 e->ec = aeb->ec;
1583 ubi_assert(e->ec >= 0);
1584
1585 wl_tree_add(e, &ubi->free);
1586 ubi->free_count++;
1587
1588 ubi->lookuptbl[e->pnum] = e;
1589
1590 found_pebs++;
1591 }
1592
1593 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1594 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1595 cond_resched();
1596
1597 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1598 if (!e)
1599 goto out_free;
1600
1601 e->pnum = aeb->pnum;
1602 e->ec = aeb->ec;
1603 ubi->lookuptbl[e->pnum] = e;
1604
1605 if (!aeb->scrub) {
1606 dbg_wl("add PEB %d EC %d to the used tree",
1607 e->pnum, e->ec);
1608 wl_tree_add(e, &ubi->used);
1609 } else {
1610 dbg_wl("add PEB %d EC %d to the scrub tree",
1611 e->pnum, e->ec);
1612 wl_tree_add(e, &ubi->scrub);
1613 }
1614
1615 found_pebs++;
1616 }
1617 }
1618
1619 dbg_wl("found %i PEBs", found_pebs);
1620
1621 if (ubi->fm) {
1622 ubi_assert(ubi->good_peb_count ==
1623 found_pebs + ubi->fm->used_blocks);
1624
1625 for (i = 0; i < ubi->fm->used_blocks; i++) {
1626 e = ubi->fm->e[i];
1627 ubi->lookuptbl[e->pnum] = e;
1628 }
1629 }
1630 else
1631 ubi_assert(ubi->good_peb_count == found_pebs);
1632
1633 reserved_pebs = WL_RESERVED_PEBS;
1634 ubi_fastmap_init(ubi, &reserved_pebs);
1635
1636 if (ubi->avail_pebs < reserved_pebs) {
1637 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1638 ubi->avail_pebs, reserved_pebs);
1639 if (ubi->corr_peb_count)
1640 ubi_err(ubi, "%d PEBs are corrupted and not used",
1641 ubi->corr_peb_count);
1642 goto out_free;
1643 }
1644 ubi->avail_pebs -= reserved_pebs;
1645 ubi->rsvd_pebs += reserved_pebs;
1646
1647
1648 err = ensure_wear_leveling(ubi, 0);
1649 if (err)
1650 goto out_free;
1651
1652 return 0;
1653
1654out_free:
1655 shutdown_work(ubi);
1656 tree_destroy(ubi, &ubi->used);
1657 tree_destroy(ubi, &ubi->free);
1658 tree_destroy(ubi, &ubi->scrub);
1659 kfree(ubi->lookuptbl);
1660 return err;
1661}
1662
1663
1664
1665
1666
1667static void protection_queue_destroy(struct ubi_device *ubi)
1668{
1669 int i;
1670 struct ubi_wl_entry *e, *tmp;
1671
1672 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1673 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1674 list_del(&e->u.list);
1675 wl_entry_destroy(ubi, e);
1676 }
1677 }
1678}
1679
1680
1681
1682
1683
1684void ubi_wl_close(struct ubi_device *ubi)
1685{
1686 dbg_wl("close the WL sub-system");
1687 ubi_fastmap_close(ubi);
1688 shutdown_work(ubi);
1689 protection_queue_destroy(ubi);
1690 tree_destroy(ubi, &ubi->used);
1691 tree_destroy(ubi, &ubi->erroneous);
1692 tree_destroy(ubi, &ubi->free);
1693 tree_destroy(ubi, &ubi->scrub);
1694 kfree(ubi->lookuptbl);
1695}
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1708{
1709 int err;
1710 long long read_ec;
1711 struct ubi_ec_hdr *ec_hdr;
1712
1713 if (!ubi_dbg_chk_gen(ubi))
1714 return 0;
1715
1716 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1717 if (!ec_hdr)
1718 return -ENOMEM;
1719
1720 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1721 if (err && err != UBI_IO_BITFLIPS) {
1722
1723 err = 0;
1724 goto out_free;
1725 }
1726
1727 read_ec = be64_to_cpu(ec_hdr->ec);
1728 if (ec != read_ec && read_ec - ec > 1) {
1729 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1730 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1731 dump_stack();
1732 err = 1;
1733 } else
1734 err = 0;
1735
1736out_free:
1737 kfree(ec_hdr);
1738 return err;
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750static int self_check_in_wl_tree(const struct ubi_device *ubi,
1751 struct ubi_wl_entry *e, struct rb_root *root)
1752{
1753 if (!ubi_dbg_chk_gen(ubi))
1754 return 0;
1755
1756 if (in_wl_tree(e, root))
1757 return 0;
1758
1759 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1760 e->pnum, e->ec, root);
1761 dump_stack();
1762 return -EINVAL;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773static int self_check_in_pq(const struct ubi_device *ubi,
1774 struct ubi_wl_entry *e)
1775{
1776 struct ubi_wl_entry *p;
1777 int i;
1778
1779 if (!ubi_dbg_chk_gen(ubi))
1780 return 0;
1781
1782 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1783 list_for_each_entry(p, &ubi->pq[i], u.list)
1784 if (p == e)
1785 return 0;
1786
1787 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1788 e->pnum, e->ec);
1789 dump_stack();
1790 return -EINVAL;
1791}
1792#ifndef CONFIG_MTD_UBI_FASTMAP
1793static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
1794{
1795 struct ubi_wl_entry *e;
1796
1797 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1798 self_check_in_wl_tree(ubi, e, &ubi->free);
1799 ubi->free_count--;
1800 ubi_assert(ubi->free_count >= 0);
1801 rb_erase(&e->u.rb, &ubi->free);
1802
1803 return e;
1804}
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815static int produce_free_peb(struct ubi_device *ubi)
1816{
1817 int err;
1818
1819 while (!ubi->free.rb_node && ubi->works_count) {
1820 spin_unlock(&ubi->wl_lock);
1821
1822 dbg_wl("do one work synchronously");
1823 err = do_work(ubi);
1824
1825 spin_lock(&ubi->wl_lock);
1826 if (err)
1827 return err;
1828 }
1829
1830 return 0;
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841int ubi_wl_get_peb(struct ubi_device *ubi)
1842{
1843 int err;
1844 struct ubi_wl_entry *e;
1845
1846retry:
1847 down_read(&ubi->fm_eba_sem);
1848 spin_lock(&ubi->wl_lock);
1849 if (!ubi->free.rb_node) {
1850 if (ubi->works_count == 0) {
1851 ubi_err(ubi, "no free eraseblocks");
1852 ubi_assert(list_empty(&ubi->works));
1853 spin_unlock(&ubi->wl_lock);
1854 return -ENOSPC;
1855 }
1856
1857 err = produce_free_peb(ubi);
1858 if (err < 0) {
1859 spin_unlock(&ubi->wl_lock);
1860 return err;
1861 }
1862 spin_unlock(&ubi->wl_lock);
1863 up_read(&ubi->fm_eba_sem);
1864 goto retry;
1865
1866 }
1867 e = wl_get_wle(ubi);
1868 prot_queue_add(ubi, e);
1869 spin_unlock(&ubi->wl_lock);
1870
1871 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1872 ubi->peb_size - ubi->vid_hdr_aloffset);
1873 if (err) {
1874 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
1875 return err;
1876 }
1877
1878 return e->pnum;
1879}
1880#else
1881#include "fastmap-wl.c"
1882#endif
1883