1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101#include <linux/slab.h>
102#include <linux/crc32.h>
103#include <linux/freezer.h>
104#include <linux/kthread.h>
105#include "ubi.h"
106#include "wl.h"
107
108
109#define WL_RESERVED_PEBS 1
110
111
112
113
114
115
116
117#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
118
119
120
121
122
123
124
125
126
127
128
129
130#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
131
132
133
134
135
136#define WL_MAX_FAILURES 32
137
138static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
139static int self_check_in_wl_tree(const struct ubi_device *ubi,
140 struct ubi_wl_entry *e, struct rb_root *root);
141static int self_check_in_pq(const struct ubi_device *ubi,
142 struct ubi_wl_entry *e);
143
144
145
146
147
148
149
150
151
152static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
153{
154 struct rb_node **p, *parent = NULL;
155
156 p = &root->rb_node;
157 while (*p) {
158 struct ubi_wl_entry *e1;
159
160 parent = *p;
161 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
162
163 if (e->ec < e1->ec)
164 p = &(*p)->rb_left;
165 else if (e->ec > e1->ec)
166 p = &(*p)->rb_right;
167 else {
168 ubi_assert(e->pnum != e1->pnum);
169 if (e->pnum < e1->pnum)
170 p = &(*p)->rb_left;
171 else
172 p = &(*p)->rb_right;
173 }
174 }
175
176 rb_link_node(&e->u.rb, parent, p);
177 rb_insert_color(&e->u.rb, root);
178}
179
180
181
182
183
184
185
186
187
188static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
189{
190 ubi->lookuptbl[e->pnum] = NULL;
191 kmem_cache_free(ubi_wl_entry_slab, e);
192}
193
194
195
196
197
198
199
200
201static int do_work(struct ubi_device *ubi)
202{
203 int err;
204 struct ubi_work *wrk;
205
206 cond_resched();
207
208
209
210
211
212
213
214 down_read(&ubi->work_sem);
215 spin_lock(&ubi->wl_lock);
216 if (list_empty(&ubi->works)) {
217 spin_unlock(&ubi->wl_lock);
218 up_read(&ubi->work_sem);
219 return 0;
220 }
221
222 wrk = list_entry(ubi->works.next, struct ubi_work, list);
223 list_del(&wrk->list);
224 ubi->works_count -= 1;
225 ubi_assert(ubi->works_count >= 0);
226 spin_unlock(&ubi->wl_lock);
227
228
229
230
231
232
233 err = wrk->func(ubi, wrk, 0);
234 if (err)
235 ubi_err(ubi, "work failed with error code %d", err);
236 up_read(&ubi->work_sem);
237
238 return err;
239}
240
241
242
243
244
245
246
247
248
249static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
250{
251 struct rb_node *p;
252
253 p = root->rb_node;
254 while (p) {
255 struct ubi_wl_entry *e1;
256
257 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
258
259 if (e->pnum == e1->pnum) {
260 ubi_assert(e == e1);
261 return 1;
262 }
263
264 if (e->ec < e1->ec)
265 p = p->rb_left;
266 else if (e->ec > e1->ec)
267 p = p->rb_right;
268 else {
269 ubi_assert(e->pnum != e1->pnum);
270 if (e->pnum < e1->pnum)
271 p = p->rb_left;
272 else
273 p = p->rb_right;
274 }
275 }
276
277 return 0;
278}
279
280
281
282
283
284
285
286
287
288
289
290static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
291{
292 int pq_tail = ubi->pq_head - 1;
293
294 if (pq_tail < 0)
295 pq_tail = UBI_PROT_QUEUE_LEN - 1;
296 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
297 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
298 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
299}
300
301
302
303
304
305
306
307
308
309
310static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
311 struct rb_root *root, int diff)
312{
313 struct rb_node *p;
314 struct ubi_wl_entry *e, *prev_e = NULL;
315 int max;
316
317 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
318 max = e->ec + diff;
319
320 p = root->rb_node;
321 while (p) {
322 struct ubi_wl_entry *e1;
323
324 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
325 if (e1->ec >= max)
326 p = p->rb_left;
327 else {
328 p = p->rb_right;
329 prev_e = e;
330 e = e1;
331 }
332 }
333
334
335
336
337 if (prev_e && !ubi->fm_disabled &&
338 !ubi->fm && e->pnum < UBI_FM_MAX_START)
339 return prev_e;
340
341 return e;
342}
343
344
345
346
347
348
349
350
351
352
353static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
354 struct rb_root *root)
355{
356 struct ubi_wl_entry *e, *first, *last;
357
358 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
359 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
360
361 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
362 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
363
364
365
366
367 e = may_reserve_for_fm(ubi, e, root);
368 } else
369 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
370
371 return e;
372}
373
374
375
376
377
378
379
380
381
382static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
383{
384 struct ubi_wl_entry *e;
385
386 e = find_mean_wl_entry(ubi, &ubi->free);
387 if (!e) {
388 ubi_err(ubi, "no free eraseblocks");
389 return NULL;
390 }
391
392 self_check_in_wl_tree(ubi, e, &ubi->free);
393
394
395
396
397
398 rb_erase(&e->u.rb, &ubi->free);
399 ubi->free_count--;
400 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
401
402 return e;
403}
404
405
406
407
408
409
410
411
412
413static int prot_queue_del(struct ubi_device *ubi, int pnum)
414{
415 struct ubi_wl_entry *e;
416
417 e = ubi->lookuptbl[pnum];
418 if (!e)
419 return -ENODEV;
420
421 if (self_check_in_pq(ubi, e))
422 return -ENODEV;
423
424 list_del(&e->u.list);
425 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
426 return 0;
427}
428
429
430
431
432
433
434
435
436
437
438static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
439 int torture)
440{
441 int err;
442 struct ubi_ec_hdr *ec_hdr;
443 unsigned long long ec = e->ec;
444
445 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
446
447 err = self_check_ec(ubi, e->pnum, e->ec);
448 if (err)
449 return -EINVAL;
450
451 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
452 if (!ec_hdr)
453 return -ENOMEM;
454
455 err = ubi_io_sync_erase(ubi, e->pnum, torture);
456 if (err < 0)
457 goto out_free;
458
459 ec += err;
460 if (ec > UBI_MAX_ERASECOUNTER) {
461
462
463
464
465 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
466 e->pnum, ec);
467 err = -EINVAL;
468 goto out_free;
469 }
470
471 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
472
473 ec_hdr->ec = cpu_to_be64(ec);
474
475 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
476 if (err)
477 goto out_free;
478
479 e->ec = ec;
480 spin_lock(&ubi->wl_lock);
481 if (e->ec > ubi->max_ec)
482 ubi->max_ec = e->ec;
483 spin_unlock(&ubi->wl_lock);
484
485out_free:
486 kfree(ec_hdr);
487 return err;
488}
489
490
491
492
493
494
495
496
497
498static void serve_prot_queue(struct ubi_device *ubi)
499{
500 struct ubi_wl_entry *e, *tmp;
501 int count;
502
503
504
505
506
507repeat:
508 count = 0;
509 spin_lock(&ubi->wl_lock);
510 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
511 dbg_wl("PEB %d EC %d protection over, move to used tree",
512 e->pnum, e->ec);
513
514 list_del(&e->u.list);
515 wl_tree_add(e, &ubi->used);
516 if (count++ > 32) {
517
518
519
520
521 spin_unlock(&ubi->wl_lock);
522 cond_resched();
523 goto repeat;
524 }
525 }
526
527 ubi->pq_head += 1;
528 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
529 ubi->pq_head = 0;
530 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
531 spin_unlock(&ubi->wl_lock);
532}
533
534
535
536
537
538
539
540
541
542static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
543{
544 spin_lock(&ubi->wl_lock);
545 list_add_tail(&wrk->list, &ubi->works);
546 ubi_assert(ubi->works_count >= 0);
547 ubi->works_count += 1;
548 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
549 wake_up_process(ubi->bgt_thread);
550 spin_unlock(&ubi->wl_lock);
551}
552
553
554
555
556
557
558
559
560
561static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
562{
563 down_read(&ubi->work_sem);
564 __schedule_ubi_work(ubi, wrk);
565 up_read(&ubi->work_sem);
566}
567
568static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
569 int shutdown);
570
571
572
573
574
575
576
577
578
579
580
581
582static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
583 int vol_id, int lnum, int torture, bool nested)
584{
585 struct ubi_work *wl_wrk;
586
587 ubi_assert(e);
588
589 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
590 e->pnum, e->ec, torture);
591
592 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
593 if (!wl_wrk)
594 return -ENOMEM;
595
596 wl_wrk->func = &erase_worker;
597 wl_wrk->e = e;
598 wl_wrk->vol_id = vol_id;
599 wl_wrk->lnum = lnum;
600 wl_wrk->torture = torture;
601
602 if (nested)
603 __schedule_ubi_work(ubi, wl_wrk);
604 else
605 schedule_ubi_work(ubi, wl_wrk);
606 return 0;
607}
608
609static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
610
611
612
613
614
615
616
617
618
619static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
620 int vol_id, int lnum, int torture)
621{
622 struct ubi_work wl_wrk;
623
624 dbg_wl("sync erase of PEB %i", e->pnum);
625
626 wl_wrk.e = e;
627 wl_wrk.vol_id = vol_id;
628 wl_wrk.lnum = lnum;
629 wl_wrk.torture = torture;
630
631 return __erase_worker(ubi, &wl_wrk);
632}
633
634static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
635
636
637
638
639
640
641
642
643
644
645
646static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
647 int shutdown)
648{
649 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
650 int erase = 0, keep = 0, vol_id = -1, lnum = -1;
651#ifdef CONFIG_MTD_UBI_FASTMAP
652 int anchor = wrk->anchor;
653#endif
654 struct ubi_wl_entry *e1, *e2;
655 struct ubi_vid_io_buf *vidb;
656 struct ubi_vid_hdr *vid_hdr;
657 int dst_leb_clean = 0;
658
659 kfree(wrk);
660 if (shutdown)
661 return 0;
662
663 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
664 if (!vidb)
665 return -ENOMEM;
666
667 vid_hdr = ubi_get_vid_hdr(vidb);
668
669 down_read(&ubi->fm_eba_sem);
670 mutex_lock(&ubi->move_mutex);
671 spin_lock(&ubi->wl_lock);
672 ubi_assert(!ubi->move_from && !ubi->move_to);
673 ubi_assert(!ubi->move_to_put);
674
675 if (!ubi->free.rb_node ||
676 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
677
678
679
680
681
682
683
684
685
686
687 dbg_wl("cancel WL, a list is empty: free %d, used %d",
688 !ubi->free.rb_node, !ubi->used.rb_node);
689 goto out_cancel;
690 }
691
692#ifdef CONFIG_MTD_UBI_FASTMAP
693
694 if (!anchor)
695 anchor = !anchor_pebs_avalible(&ubi->free);
696
697 if (anchor) {
698 e1 = find_anchor_wl_entry(&ubi->used);
699 if (!e1)
700 goto out_cancel;
701 e2 = get_peb_for_wl(ubi);
702 if (!e2)
703 goto out_cancel;
704
705 self_check_in_wl_tree(ubi, e1, &ubi->used);
706 rb_erase(&e1->u.rb, &ubi->used);
707 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
708 } else if (!ubi->scrub.rb_node) {
709#else
710 if (!ubi->scrub.rb_node) {
711#endif
712
713
714
715
716
717 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
718 e2 = get_peb_for_wl(ubi);
719 if (!e2)
720 goto out_cancel;
721
722 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
723 dbg_wl("no WL needed: min used EC %d, max free EC %d",
724 e1->ec, e2->ec);
725
726
727 wl_tree_add(e2, &ubi->free);
728 ubi->free_count++;
729 goto out_cancel;
730 }
731 self_check_in_wl_tree(ubi, e1, &ubi->used);
732 rb_erase(&e1->u.rb, &ubi->used);
733 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
734 e1->pnum, e1->ec, e2->pnum, e2->ec);
735 } else {
736
737 scrubbing = 1;
738 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
739 e2 = get_peb_for_wl(ubi);
740 if (!e2)
741 goto out_cancel;
742
743 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
744 rb_erase(&e1->u.rb, &ubi->scrub);
745 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
746 }
747
748 ubi->move_from = e1;
749 ubi->move_to = e2;
750 spin_unlock(&ubi->wl_lock);
751
752
753
754
755
756
757
758
759
760
761
762
763 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
764 if (err && err != UBI_IO_BITFLIPS) {
765 dst_leb_clean = 1;
766 if (err == UBI_IO_FF) {
767
768
769
770
771
772
773
774
775
776
777 dbg_wl("PEB %d has no VID header", e1->pnum);
778 protect = 1;
779 goto out_not_moved;
780 } else if (err == UBI_IO_FF_BITFLIPS) {
781
782
783
784
785
786 dbg_wl("PEB %d has no VID header but has bit-flips",
787 e1->pnum);
788 scrubbing = 1;
789 goto out_not_moved;
790 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
791
792
793
794
795
796 dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
797 e1->pnum);
798 erase = 1;
799 goto out_not_moved;
800 }
801
802 ubi_err(ubi, "error %d while reading VID header from PEB %d",
803 err, e1->pnum);
804 goto out_error;
805 }
806
807 vol_id = be32_to_cpu(vid_hdr->vol_id);
808 lnum = be32_to_cpu(vid_hdr->lnum);
809
810 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
811 if (err) {
812 if (err == MOVE_CANCEL_RACE) {
813
814
815
816
817
818
819
820 protect = 1;
821 dst_leb_clean = 1;
822 goto out_not_moved;
823 }
824 if (err == MOVE_RETRY) {
825 scrubbing = 1;
826 dst_leb_clean = 1;
827 goto out_not_moved;
828 }
829 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
830 err == MOVE_TARGET_RD_ERR) {
831
832
833
834 torture = 1;
835 keep = 1;
836 goto out_not_moved;
837 }
838
839 if (err == MOVE_SOURCE_RD_ERR) {
840
841
842
843
844
845
846
847
848 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
849 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
850 ubi->erroneous_peb_count);
851 goto out_error;
852 }
853 dst_leb_clean = 1;
854 erroneous = 1;
855 goto out_not_moved;
856 }
857
858 if (err < 0)
859 goto out_error;
860
861 ubi_assert(0);
862 }
863
864
865 if (scrubbing)
866 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
867 e1->pnum, vol_id, lnum, e2->pnum);
868 ubi_free_vid_buf(vidb);
869
870 spin_lock(&ubi->wl_lock);
871 if (!ubi->move_to_put) {
872 wl_tree_add(e2, &ubi->used);
873 e2 = NULL;
874 }
875 ubi->move_from = ubi->move_to = NULL;
876 ubi->move_to_put = ubi->wl_scheduled = 0;
877 spin_unlock(&ubi->wl_lock);
878
879 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
880 if (err) {
881 if (e2)
882 wl_entry_destroy(ubi, e2);
883 goto out_ro;
884 }
885
886 if (e2) {
887
888
889
890
891 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
892 e2->pnum, vol_id, lnum);
893 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
894 if (err)
895 goto out_ro;
896 }
897
898 dbg_wl("done");
899 mutex_unlock(&ubi->move_mutex);
900 up_read(&ubi->fm_eba_sem);
901 return 0;
902
903
904
905
906
907
908out_not_moved:
909 if (vol_id != -1)
910 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
911 e1->pnum, vol_id, lnum, e2->pnum, err);
912 else
913 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
914 e1->pnum, e2->pnum, err);
915 spin_lock(&ubi->wl_lock);
916 if (protect)
917 prot_queue_add(ubi, e1);
918 else if (erroneous) {
919 wl_tree_add(e1, &ubi->erroneous);
920 ubi->erroneous_peb_count += 1;
921 } else if (scrubbing)
922 wl_tree_add(e1, &ubi->scrub);
923 else if (keep)
924 wl_tree_add(e1, &ubi->used);
925 if (dst_leb_clean) {
926 wl_tree_add(e2, &ubi->free);
927 ubi->free_count++;
928 }
929
930 ubi_assert(!ubi->move_to_put);
931 ubi->move_from = ubi->move_to = NULL;
932 ubi->wl_scheduled = 0;
933 spin_unlock(&ubi->wl_lock);
934
935 ubi_free_vid_buf(vidb);
936 if (dst_leb_clean) {
937 ensure_wear_leveling(ubi, 1);
938 } else {
939 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
940 if (err)
941 goto out_ro;
942 }
943
944 if (erase) {
945 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
946 if (err)
947 goto out_ro;
948 }
949
950 mutex_unlock(&ubi->move_mutex);
951 up_read(&ubi->fm_eba_sem);
952 return 0;
953
954out_error:
955 if (vol_id != -1)
956 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
957 err, e1->pnum, e2->pnum);
958 else
959 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
960 err, e1->pnum, vol_id, lnum, e2->pnum);
961 spin_lock(&ubi->wl_lock);
962 ubi->move_from = ubi->move_to = NULL;
963 ubi->move_to_put = ubi->wl_scheduled = 0;
964 spin_unlock(&ubi->wl_lock);
965
966 ubi_free_vid_buf(vidb);
967 wl_entry_destroy(ubi, e1);
968 wl_entry_destroy(ubi, e2);
969
970out_ro:
971 ubi_ro_mode(ubi);
972 mutex_unlock(&ubi->move_mutex);
973 up_read(&ubi->fm_eba_sem);
974 ubi_assert(err != 0);
975 return err < 0 ? err : -EIO;
976
977out_cancel:
978 ubi->wl_scheduled = 0;
979 spin_unlock(&ubi->wl_lock);
980 mutex_unlock(&ubi->move_mutex);
981 up_read(&ubi->fm_eba_sem);
982 ubi_free_vid_buf(vidb);
983 return 0;
984}
985
986
987
988
989
990
991
992
993
994
995static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
996{
997 int err = 0;
998 struct ubi_wl_entry *e1;
999 struct ubi_wl_entry *e2;
1000 struct ubi_work *wrk;
1001
1002 spin_lock(&ubi->wl_lock);
1003 if (ubi->wl_scheduled)
1004
1005 goto out_unlock;
1006
1007
1008
1009
1010
1011 if (!ubi->scrub.rb_node) {
1012 if (!ubi->used.rb_node || !ubi->free.rb_node)
1013
1014 goto out_unlock;
1015
1016
1017
1018
1019
1020
1021
1022 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1023 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1024
1025 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1026 goto out_unlock;
1027 dbg_wl("schedule wear-leveling");
1028 } else
1029 dbg_wl("schedule scrubbing");
1030
1031 ubi->wl_scheduled = 1;
1032 spin_unlock(&ubi->wl_lock);
1033
1034 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1035 if (!wrk) {
1036 err = -ENOMEM;
1037 goto out_cancel;
1038 }
1039
1040 wrk->anchor = 0;
1041 wrk->func = &wear_leveling_worker;
1042 if (nested)
1043 __schedule_ubi_work(ubi, wrk);
1044 else
1045 schedule_ubi_work(ubi, wrk);
1046 return err;
1047
1048out_cancel:
1049 spin_lock(&ubi->wl_lock);
1050 ubi->wl_scheduled = 0;
1051out_unlock:
1052 spin_unlock(&ubi->wl_lock);
1053 return err;
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1069{
1070 struct ubi_wl_entry *e = wl_wrk->e;
1071 int pnum = e->pnum;
1072 int vol_id = wl_wrk->vol_id;
1073 int lnum = wl_wrk->lnum;
1074 int err, available_consumed = 0;
1075
1076 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1077 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1078
1079 err = sync_erase(ubi, e, wl_wrk->torture);
1080 if (!err) {
1081 spin_lock(&ubi->wl_lock);
1082 wl_tree_add(e, &ubi->free);
1083 ubi->free_count++;
1084 spin_unlock(&ubi->wl_lock);
1085
1086
1087
1088
1089
1090 serve_prot_queue(ubi);
1091
1092
1093 err = ensure_wear_leveling(ubi, 1);
1094 return err;
1095 }
1096
1097 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1098
1099 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1100 err == -EBUSY) {
1101 int err1;
1102
1103
1104 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1105 if (err1) {
1106 wl_entry_destroy(ubi, e);
1107 err = err1;
1108 goto out_ro;
1109 }
1110 return err;
1111 }
1112
1113 wl_entry_destroy(ubi, e);
1114 if (err != -EIO)
1115
1116
1117
1118
1119
1120 goto out_ro;
1121
1122
1123
1124 if (!ubi->bad_allowed) {
1125 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1126 goto out_ro;
1127 }
1128
1129 spin_lock(&ubi->volumes_lock);
1130 if (ubi->beb_rsvd_pebs == 0) {
1131 if (ubi->avail_pebs == 0) {
1132 spin_unlock(&ubi->volumes_lock);
1133 ubi_err(ubi, "no reserved/available physical eraseblocks");
1134 goto out_ro;
1135 }
1136 ubi->avail_pebs -= 1;
1137 available_consumed = 1;
1138 }
1139 spin_unlock(&ubi->volumes_lock);
1140
1141 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1142 err = ubi_io_mark_bad(ubi, pnum);
1143 if (err)
1144 goto out_ro;
1145
1146 spin_lock(&ubi->volumes_lock);
1147 if (ubi->beb_rsvd_pebs > 0) {
1148 if (available_consumed) {
1149
1150
1151
1152
1153 ubi->avail_pebs += 1;
1154 available_consumed = 0;
1155 }
1156 ubi->beb_rsvd_pebs -= 1;
1157 }
1158 ubi->bad_peb_count += 1;
1159 ubi->good_peb_count -= 1;
1160 ubi_calculate_reserved(ubi);
1161 if (available_consumed)
1162 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1163 else if (ubi->beb_rsvd_pebs)
1164 ubi_msg(ubi, "%d PEBs left in the reserve",
1165 ubi->beb_rsvd_pebs);
1166 else
1167 ubi_warn(ubi, "last PEB from the reserve was used");
1168 spin_unlock(&ubi->volumes_lock);
1169
1170 return err;
1171
1172out_ro:
1173 if (available_consumed) {
1174 spin_lock(&ubi->volumes_lock);
1175 ubi->avail_pebs += 1;
1176 spin_unlock(&ubi->volumes_lock);
1177 }
1178 ubi_ro_mode(ubi);
1179 return err;
1180}
1181
1182static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1183 int shutdown)
1184{
1185 int ret;
1186
1187 if (shutdown) {
1188 struct ubi_wl_entry *e = wl_wrk->e;
1189
1190 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1191 kfree(wl_wrk);
1192 wl_entry_destroy(ubi, e);
1193 return 0;
1194 }
1195
1196 ret = __erase_worker(ubi, wl_wrk);
1197 kfree(wl_wrk);
1198 return ret;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1215 int pnum, int torture)
1216{
1217 int err;
1218 struct ubi_wl_entry *e;
1219
1220 dbg_wl("PEB %d", pnum);
1221 ubi_assert(pnum >= 0);
1222 ubi_assert(pnum < ubi->peb_count);
1223
1224 down_read(&ubi->fm_protect);
1225
1226retry:
1227 spin_lock(&ubi->wl_lock);
1228 e = ubi->lookuptbl[pnum];
1229 if (e == ubi->move_from) {
1230
1231
1232
1233
1234
1235 dbg_wl("PEB %d is being moved, wait", pnum);
1236 spin_unlock(&ubi->wl_lock);
1237
1238
1239 mutex_lock(&ubi->move_mutex);
1240 mutex_unlock(&ubi->move_mutex);
1241 goto retry;
1242 } else if (e == ubi->move_to) {
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 dbg_wl("PEB %d is the target of data moving", pnum);
1253 ubi_assert(!ubi->move_to_put);
1254 ubi->move_to_put = 1;
1255 spin_unlock(&ubi->wl_lock);
1256 up_read(&ubi->fm_protect);
1257 return 0;
1258 } else {
1259 if (in_wl_tree(e, &ubi->used)) {
1260 self_check_in_wl_tree(ubi, e, &ubi->used);
1261 rb_erase(&e->u.rb, &ubi->used);
1262 } else if (in_wl_tree(e, &ubi->scrub)) {
1263 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1264 rb_erase(&e->u.rb, &ubi->scrub);
1265 } else if (in_wl_tree(e, &ubi->erroneous)) {
1266 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1267 rb_erase(&e->u.rb, &ubi->erroneous);
1268 ubi->erroneous_peb_count -= 1;
1269 ubi_assert(ubi->erroneous_peb_count >= 0);
1270
1271 torture = 1;
1272 } else {
1273 err = prot_queue_del(ubi, e->pnum);
1274 if (err) {
1275 ubi_err(ubi, "PEB %d not found", pnum);
1276 ubi_ro_mode(ubi);
1277 spin_unlock(&ubi->wl_lock);
1278 up_read(&ubi->fm_protect);
1279 return err;
1280 }
1281 }
1282 }
1283 spin_unlock(&ubi->wl_lock);
1284
1285 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1286 if (err) {
1287 spin_lock(&ubi->wl_lock);
1288 wl_tree_add(e, &ubi->used);
1289 spin_unlock(&ubi->wl_lock);
1290 }
1291
1292 up_read(&ubi->fm_protect);
1293 return err;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1307{
1308 struct ubi_wl_entry *e;
1309
1310 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1311
1312retry:
1313 spin_lock(&ubi->wl_lock);
1314 e = ubi->lookuptbl[pnum];
1315 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1316 in_wl_tree(e, &ubi->erroneous)) {
1317 spin_unlock(&ubi->wl_lock);
1318 return 0;
1319 }
1320
1321 if (e == ubi->move_to) {
1322
1323
1324
1325
1326
1327
1328 spin_unlock(&ubi->wl_lock);
1329 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1330 yield();
1331 goto retry;
1332 }
1333
1334 if (in_wl_tree(e, &ubi->used)) {
1335 self_check_in_wl_tree(ubi, e, &ubi->used);
1336 rb_erase(&e->u.rb, &ubi->used);
1337 } else {
1338 int err;
1339
1340 err = prot_queue_del(ubi, e->pnum);
1341 if (err) {
1342 ubi_err(ubi, "PEB %d not found", pnum);
1343 ubi_ro_mode(ubi);
1344 spin_unlock(&ubi->wl_lock);
1345 return err;
1346 }
1347 }
1348
1349 wl_tree_add(e, &ubi->scrub);
1350 spin_unlock(&ubi->wl_lock);
1351
1352
1353
1354
1355
1356 return ensure_wear_leveling(ubi, 0);
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1372{
1373 int err = 0;
1374 int found = 1;
1375
1376
1377
1378
1379
1380 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1381 vol_id, lnum, ubi->works_count);
1382
1383 while (found) {
1384 struct ubi_work *wrk, *tmp;
1385 found = 0;
1386
1387 down_read(&ubi->work_sem);
1388 spin_lock(&ubi->wl_lock);
1389 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1390 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1391 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1392 list_del(&wrk->list);
1393 ubi->works_count -= 1;
1394 ubi_assert(ubi->works_count >= 0);
1395 spin_unlock(&ubi->wl_lock);
1396
1397 err = wrk->func(ubi, wrk, 0);
1398 if (err) {
1399 up_read(&ubi->work_sem);
1400 return err;
1401 }
1402
1403 spin_lock(&ubi->wl_lock);
1404 found = 1;
1405 break;
1406 }
1407 }
1408 spin_unlock(&ubi->wl_lock);
1409 up_read(&ubi->work_sem);
1410 }
1411
1412
1413
1414
1415
1416 down_write(&ubi->work_sem);
1417 up_write(&ubi->work_sem);
1418
1419 return err;
1420}
1421
1422
1423
1424
1425
1426
1427static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1428{
1429 struct rb_node *rb;
1430 struct ubi_wl_entry *e;
1431
1432 rb = root->rb_node;
1433 while (rb) {
1434 if (rb->rb_left)
1435 rb = rb->rb_left;
1436 else if (rb->rb_right)
1437 rb = rb->rb_right;
1438 else {
1439 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1440
1441 rb = rb_parent(rb);
1442 if (rb) {
1443 if (rb->rb_left == &e->u.rb)
1444 rb->rb_left = NULL;
1445 else
1446 rb->rb_right = NULL;
1447 }
1448
1449 wl_entry_destroy(ubi, e);
1450 }
1451 }
1452}
1453
1454
1455
1456
1457
1458int ubi_thread(void *u)
1459{
1460 int failures = 0;
1461 struct ubi_device *ubi = u;
1462
1463 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1464 ubi->bgt_name, task_pid_nr(current));
1465
1466 set_freezable();
1467 for (;;) {
1468 int err;
1469
1470 if (kthread_should_stop())
1471 break;
1472
1473 if (try_to_freeze())
1474 continue;
1475
1476 spin_lock(&ubi->wl_lock);
1477 if (list_empty(&ubi->works) || ubi->ro_mode ||
1478 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1479 set_current_state(TASK_INTERRUPTIBLE);
1480 spin_unlock(&ubi->wl_lock);
1481 schedule();
1482 continue;
1483 }
1484 spin_unlock(&ubi->wl_lock);
1485
1486 err = do_work(ubi);
1487 if (err) {
1488 ubi_err(ubi, "%s: work failed with error code %d",
1489 ubi->bgt_name, err);
1490 if (failures++ > WL_MAX_FAILURES) {
1491
1492
1493
1494
1495 ubi_msg(ubi, "%s: %d consecutive failures",
1496 ubi->bgt_name, WL_MAX_FAILURES);
1497 ubi_ro_mode(ubi);
1498 ubi->thread_enabled = 0;
1499 continue;
1500 }
1501 } else
1502 failures = 0;
1503
1504 cond_resched();
1505 }
1506
1507 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1508 return 0;
1509}
1510
1511
1512
1513
1514
1515static void shutdown_work(struct ubi_device *ubi)
1516{
1517#ifdef CONFIG_MTD_UBI_FASTMAP
1518 flush_work(&ubi->fm_work);
1519#endif
1520 while (!list_empty(&ubi->works)) {
1521 struct ubi_work *wrk;
1522
1523 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1524 list_del(&wrk->list);
1525 wrk->func(ubi, wrk, 1);
1526 ubi->works_count -= 1;
1527 ubi_assert(ubi->works_count >= 0);
1528 }
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1540{
1541 int err, i, reserved_pebs, found_pebs = 0;
1542 struct rb_node *rb1, *rb2;
1543 struct ubi_ainf_volume *av;
1544 struct ubi_ainf_peb *aeb, *tmp;
1545 struct ubi_wl_entry *e;
1546
1547 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1548 spin_lock_init(&ubi->wl_lock);
1549 mutex_init(&ubi->move_mutex);
1550 init_rwsem(&ubi->work_sem);
1551 ubi->max_ec = ai->max_ec;
1552 INIT_LIST_HEAD(&ubi->works);
1553
1554 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1555
1556 err = -ENOMEM;
1557 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1558 if (!ubi->lookuptbl)
1559 return err;
1560
1561 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1562 INIT_LIST_HEAD(&ubi->pq[i]);
1563 ubi->pq_head = 0;
1564
1565 ubi->free_count = 0;
1566 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1567 cond_resched();
1568
1569 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1570 if (!e)
1571 goto out_free;
1572
1573 e->pnum = aeb->pnum;
1574 e->ec = aeb->ec;
1575 ubi->lookuptbl[e->pnum] = e;
1576 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
1577 wl_entry_destroy(ubi, e);
1578 goto out_free;
1579 }
1580
1581 found_pebs++;
1582 }
1583
1584 list_for_each_entry(aeb, &ai->free, u.list) {
1585 cond_resched();
1586
1587 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1588 if (!e)
1589 goto out_free;
1590
1591 e->pnum = aeb->pnum;
1592 e->ec = aeb->ec;
1593 ubi_assert(e->ec >= 0);
1594
1595 wl_tree_add(e, &ubi->free);
1596 ubi->free_count++;
1597
1598 ubi->lookuptbl[e->pnum] = e;
1599
1600 found_pebs++;
1601 }
1602
1603 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1604 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1605 cond_resched();
1606
1607 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1608 if (!e)
1609 goto out_free;
1610
1611 e->pnum = aeb->pnum;
1612 e->ec = aeb->ec;
1613 ubi->lookuptbl[e->pnum] = e;
1614
1615 if (!aeb->scrub) {
1616 dbg_wl("add PEB %d EC %d to the used tree",
1617 e->pnum, e->ec);
1618 wl_tree_add(e, &ubi->used);
1619 } else {
1620 dbg_wl("add PEB %d EC %d to the scrub tree",
1621 e->pnum, e->ec);
1622 wl_tree_add(e, &ubi->scrub);
1623 }
1624
1625 found_pebs++;
1626 }
1627 }
1628
1629 list_for_each_entry(aeb, &ai->fastmap, u.list) {
1630 cond_resched();
1631
1632 e = ubi_find_fm_block(ubi, aeb->pnum);
1633
1634 if (e) {
1635 ubi_assert(!ubi->lookuptbl[e->pnum]);
1636 ubi->lookuptbl[e->pnum] = e;
1637 } else {
1638
1639
1640
1641
1642
1643
1644 if (ubi->lookuptbl[aeb->pnum])
1645 continue;
1646
1647 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1648 if (!e)
1649 goto out_free;
1650
1651 e->pnum = aeb->pnum;
1652 e->ec = aeb->ec;
1653 ubi_assert(!ubi->lookuptbl[e->pnum]);
1654 ubi->lookuptbl[e->pnum] = e;
1655 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
1656 wl_entry_destroy(ubi, e);
1657 goto out_free;
1658 }
1659 }
1660
1661 found_pebs++;
1662 }
1663
1664 dbg_wl("found %i PEBs", found_pebs);
1665
1666 ubi_assert(ubi->good_peb_count == found_pebs);
1667
1668 reserved_pebs = WL_RESERVED_PEBS;
1669 ubi_fastmap_init(ubi, &reserved_pebs);
1670
1671 if (ubi->avail_pebs < reserved_pebs) {
1672 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1673 ubi->avail_pebs, reserved_pebs);
1674 if (ubi->corr_peb_count)
1675 ubi_err(ubi, "%d PEBs are corrupted and not used",
1676 ubi->corr_peb_count);
1677 err = -ENOSPC;
1678 goto out_free;
1679 }
1680 ubi->avail_pebs -= reserved_pebs;
1681 ubi->rsvd_pebs += reserved_pebs;
1682
1683
1684 err = ensure_wear_leveling(ubi, 0);
1685 if (err)
1686 goto out_free;
1687
1688 return 0;
1689
1690out_free:
1691 shutdown_work(ubi);
1692 tree_destroy(ubi, &ubi->used);
1693 tree_destroy(ubi, &ubi->free);
1694 tree_destroy(ubi, &ubi->scrub);
1695 kfree(ubi->lookuptbl);
1696 return err;
1697}
1698
1699
1700
1701
1702
1703static void protection_queue_destroy(struct ubi_device *ubi)
1704{
1705 int i;
1706 struct ubi_wl_entry *e, *tmp;
1707
1708 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1709 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1710 list_del(&e->u.list);
1711 wl_entry_destroy(ubi, e);
1712 }
1713 }
1714}
1715
1716
1717
1718
1719
1720void ubi_wl_close(struct ubi_device *ubi)
1721{
1722 dbg_wl("close the WL sub-system");
1723 ubi_fastmap_close(ubi);
1724 shutdown_work(ubi);
1725 protection_queue_destroy(ubi);
1726 tree_destroy(ubi, &ubi->used);
1727 tree_destroy(ubi, &ubi->erroneous);
1728 tree_destroy(ubi, &ubi->free);
1729 tree_destroy(ubi, &ubi->scrub);
1730 kfree(ubi->lookuptbl);
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1744{
1745 int err;
1746 long long read_ec;
1747 struct ubi_ec_hdr *ec_hdr;
1748
1749 if (!ubi_dbg_chk_gen(ubi))
1750 return 0;
1751
1752 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1753 if (!ec_hdr)
1754 return -ENOMEM;
1755
1756 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1757 if (err && err != UBI_IO_BITFLIPS) {
1758
1759 err = 0;
1760 goto out_free;
1761 }
1762
1763 read_ec = be64_to_cpu(ec_hdr->ec);
1764 if (ec != read_ec && read_ec - ec > 1) {
1765 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1766 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1767 dump_stack();
1768 err = 1;
1769 } else
1770 err = 0;
1771
1772out_free:
1773 kfree(ec_hdr);
1774 return err;
1775}
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static int self_check_in_wl_tree(const struct ubi_device *ubi,
1787 struct ubi_wl_entry *e, struct rb_root *root)
1788{
1789 if (!ubi_dbg_chk_gen(ubi))
1790 return 0;
1791
1792 if (in_wl_tree(e, root))
1793 return 0;
1794
1795 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1796 e->pnum, e->ec, root);
1797 dump_stack();
1798 return -EINVAL;
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static int self_check_in_pq(const struct ubi_device *ubi,
1810 struct ubi_wl_entry *e)
1811{
1812 struct ubi_wl_entry *p;
1813 int i;
1814
1815 if (!ubi_dbg_chk_gen(ubi))
1816 return 0;
1817
1818 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1819 list_for_each_entry(p, &ubi->pq[i], u.list)
1820 if (p == e)
1821 return 0;
1822
1823 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1824 e->pnum, e->ec);
1825 dump_stack();
1826 return -EINVAL;
1827}
1828#ifndef CONFIG_MTD_UBI_FASTMAP
1829static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
1830{
1831 struct ubi_wl_entry *e;
1832
1833 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1834 self_check_in_wl_tree(ubi, e, &ubi->free);
1835 ubi->free_count--;
1836 ubi_assert(ubi->free_count >= 0);
1837 rb_erase(&e->u.rb, &ubi->free);
1838
1839 return e;
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851static int produce_free_peb(struct ubi_device *ubi)
1852{
1853 int err;
1854
1855 while (!ubi->free.rb_node && ubi->works_count) {
1856 spin_unlock(&ubi->wl_lock);
1857
1858 dbg_wl("do one work synchronously");
1859 err = do_work(ubi);
1860
1861 spin_lock(&ubi->wl_lock);
1862 if (err)
1863 return err;
1864 }
1865
1866 return 0;
1867}
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877int ubi_wl_get_peb(struct ubi_device *ubi)
1878{
1879 int err;
1880 struct ubi_wl_entry *e;
1881
1882retry:
1883 down_read(&ubi->fm_eba_sem);
1884 spin_lock(&ubi->wl_lock);
1885 if (!ubi->free.rb_node) {
1886 if (ubi->works_count == 0) {
1887 ubi_err(ubi, "no free eraseblocks");
1888 ubi_assert(list_empty(&ubi->works));
1889 spin_unlock(&ubi->wl_lock);
1890 return -ENOSPC;
1891 }
1892
1893 err = produce_free_peb(ubi);
1894 if (err < 0) {
1895 spin_unlock(&ubi->wl_lock);
1896 return err;
1897 }
1898 spin_unlock(&ubi->wl_lock);
1899 up_read(&ubi->fm_eba_sem);
1900 goto retry;
1901
1902 }
1903 e = wl_get_wle(ubi);
1904 prot_queue_add(ubi, e);
1905 spin_unlock(&ubi->wl_lock);
1906
1907 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1908 ubi->peb_size - ubi->vid_hdr_aloffset);
1909 if (err) {
1910 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
1911 return err;
1912 }
1913
1914 return e->pnum;
1915}
1916#else
1917#include "fastmap-wl.c"
1918#endif
1919