1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#include <linux/pagemap.h>
57#include "ubifs.h"
58
59
60
61
62
63
64#define SOFT_LEBS_LIMIT 4
65#define HARD_LEBS_LIMIT 32
66
67
68
69
70
71
72
73
74
75
76
77
78
79static int switch_gc_head(struct ubifs_info *c)
80{
81 int err, gc_lnum = c->gc_lnum;
82 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
83
84 ubifs_assert(gc_lnum != -1);
85 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
86 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
87 c->leb_size - wbuf->offs - wbuf->used);
88
89 err = ubifs_wbuf_sync_nolock(wbuf);
90 if (err)
91 return err;
92
93
94
95
96
97 err = ubifs_leb_unmap(c, gc_lnum);
98 if (err)
99 return err;
100
101 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
102 if (err)
103 return err;
104
105 c->gc_lnum = -1;
106 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0, UBI_LONGTERM);
107 return err;
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124static void list_sort(void *priv, struct list_head *head,
125 int (*cmp)(void *priv, struct list_head *a,
126 struct list_head *b))
127{
128 struct list_head *p, *q, *e, *list, *tail, *oldhead;
129 int insize, nmerges, psize, qsize, i;
130
131 if (list_empty(head))
132 return;
133
134 list = head->next;
135 list_del(head);
136 insize = 1;
137 for (;;) {
138 p = oldhead = list;
139 list = tail = NULL;
140 nmerges = 0;
141
142 while (p) {
143 nmerges++;
144 q = p;
145 psize = 0;
146 for (i = 0; i < insize; i++) {
147 psize++;
148 q = q->next == oldhead ? NULL : q->next;
149 if (!q)
150 break;
151 }
152
153 qsize = insize;
154 while (psize > 0 || (qsize > 0 && q)) {
155 if (!psize) {
156 e = q;
157 q = q->next;
158 qsize--;
159 if (q == oldhead)
160 q = NULL;
161 } else if (!qsize || !q) {
162 e = p;
163 p = p->next;
164 psize--;
165 if (p == oldhead)
166 p = NULL;
167 } else if (cmp(priv, p, q) <= 0) {
168 e = p;
169 p = p->next;
170 psize--;
171 if (p == oldhead)
172 p = NULL;
173 } else {
174 e = q;
175 q = q->next;
176 qsize--;
177 if (q == oldhead)
178 q = NULL;
179 }
180 if (tail)
181 tail->next = e;
182 else
183 list = e;
184 e->prev = tail;
185 tail = e;
186 }
187 p = q;
188 }
189
190 tail->next = list;
191 list->prev = tail;
192
193 if (nmerges <= 1)
194 break;
195
196 insize *= 2;
197 }
198
199 head->next = list;
200 head->prev = list->prev;
201 list->prev->next = head;
202 list->prev = head;
203}
204
205
206
207
208
209
210
211
212
213
214int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
215{
216 ino_t inuma, inumb;
217 struct ubifs_info *c = priv;
218 struct ubifs_scan_node *sa, *sb;
219
220 cond_resched();
221 sa = list_entry(a, struct ubifs_scan_node, list);
222 sb = list_entry(b, struct ubifs_scan_node, list);
223 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
224 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
225
226 inuma = key_inum(c, &sa->key);
227 inumb = key_inum(c, &sb->key);
228
229 if (inuma == inumb) {
230 unsigned int blka = key_block(c, &sa->key);
231 unsigned int blkb = key_block(c, &sb->key);
232
233 if (blka <= blkb)
234 return -1;
235 } else if (inuma <= inumb)
236 return -1;
237
238 return 1;
239}
240
241
242
243
244
245
246
247
248
249
250
251int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
252{
253 int typea, typeb;
254 ino_t inuma, inumb;
255 struct ubifs_info *c = priv;
256 struct ubifs_scan_node *sa, *sb;
257
258 cond_resched();
259 sa = list_entry(a, struct ubifs_scan_node, list);
260 sb = list_entry(b, struct ubifs_scan_node, list);
261 typea = key_type(c, &sa->key);
262 typeb = key_type(c, &sb->key);
263 ubifs_assert(typea != UBIFS_DATA_KEY && typeb != UBIFS_DATA_KEY);
264
265
266 if (typea == UBIFS_INO_KEY) {
267 if (typeb == UBIFS_INO_KEY)
268 return sb->len - sa->len;
269 return -1;
270 }
271 if (typeb == UBIFS_INO_KEY)
272 return 1;
273
274 ubifs_assert(typea == UBIFS_DENT_KEY && typeb == UBIFS_DENT_KEY);
275 inuma = key_inum(c, &sa->key);
276 inumb = key_inum(c, &sb->key);
277
278 if (inuma == inumb) {
279 uint32_t hasha = key_hash(c, &sa->key);
280 uint32_t hashb = key_hash(c, &sb->key);
281
282 if (hasha <= hashb)
283 return -1;
284 } else if (inuma <= inumb)
285 return -1;
286
287 return 1;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
318 struct list_head *nondata, int *min)
319{
320 struct ubifs_scan_node *snod, *tmp;
321
322 *min = INT_MAX;
323
324
325 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
326 int err;
327
328 ubifs_assert(snod->type != UBIFS_IDX_NODE);
329 ubifs_assert(snod->type != UBIFS_REF_NODE);
330 ubifs_assert(snod->type != UBIFS_CS_NODE);
331
332 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
333 snod->offs, 0);
334 if (err < 0)
335 return err;
336
337 if (!err) {
338
339 list_del(&snod->list);
340 kfree(snod);
341 continue;
342 }
343
344 if (snod->len < *min)
345 *min = snod->len;
346
347 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
348 list_move_tail(&snod->list, nondata);
349 }
350
351
352 list_sort(c, &sleb->nodes, &data_nodes_cmp);
353 list_sort(c, nondata, &nondata_nodes_cmp);
354 return 0;
355}
356
357
358
359
360
361
362
363
364
365
366
367
368static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
369 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
370{
371 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
372
373 cond_resched();
374 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
375 if (err)
376 return err;
377
378 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
379 snod->offs, new_lnum, new_offs,
380 snod->len);
381 list_del(&snod->list);
382 kfree(snod);
383 return err;
384}
385
386
387
388
389
390
391
392
393
394
395
396static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
397{
398 int err, min;
399 LIST_HEAD(nondata);
400 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
401
402 if (wbuf->lnum == -1) {
403
404
405
406
407 err = switch_gc_head(c);
408 if (err)
409 return err;
410 }
411
412 err = sort_nodes(c, sleb, &nondata, &min);
413 if (err)
414 goto out;
415
416
417 while (1) {
418 int avail;
419 struct ubifs_scan_node *snod, *tmp;
420
421
422 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
423 avail = c->leb_size - wbuf->offs - wbuf->used;
424 if (snod->len > avail)
425
426
427
428
429 break;
430
431 err = move_node(c, sleb, snod, wbuf);
432 if (err)
433 goto out;
434 }
435
436
437 list_for_each_entry_safe(snod, tmp, &nondata, list) {
438 avail = c->leb_size - wbuf->offs - wbuf->used;
439 if (avail < min)
440 break;
441
442 if (snod->len > avail) {
443
444
445
446
447
448
449
450 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
451 snod->len == UBIFS_INO_NODE_SZ)
452 break;
453 continue;
454 }
455
456 err = move_node(c, sleb, snod, wbuf);
457 if (err)
458 goto out;
459 }
460
461 if (list_empty(&sleb->nodes) && list_empty(&nondata))
462 break;
463
464
465
466
467
468 err = switch_gc_head(c);
469 if (err)
470 goto out;
471 }
472
473 return 0;
474
475out:
476 list_splice_tail(&nondata, &sleb->nodes);
477 return err;
478}
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493static int gc_sync_wbufs(struct ubifs_info *c)
494{
495 int err, i;
496
497 for (i = 0; i < c->jhead_cnt; i++) {
498 if (i == GCHD)
499 continue;
500 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
501 if (err)
502 return err;
503 }
504 return 0;
505}
506
507
508
509
510
511
512
513
514
515
516int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
517{
518 struct ubifs_scan_leb *sleb;
519 struct ubifs_scan_node *snod;
520 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
521 int err = 0, lnum = lp->lnum;
522
523 ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
524 c->need_recovery);
525 ubifs_assert(c->gc_lnum != lnum);
526 ubifs_assert(wbuf->lnum != lnum);
527
528
529
530
531
532 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
533 if (IS_ERR(sleb))
534 return PTR_ERR(sleb);
535
536 ubifs_assert(!list_empty(&sleb->nodes));
537 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
538
539 if (snod->type == UBIFS_IDX_NODE) {
540 struct ubifs_gced_idx_leb *idx_gc;
541
542 dbg_gc("indexing LEB %d (free %d, dirty %d)",
543 lnum, lp->free, lp->dirty);
544 list_for_each_entry(snod, &sleb->nodes, list) {
545 struct ubifs_idx_node *idx = snod->node;
546 int level = le16_to_cpu(idx->level);
547
548 ubifs_assert(snod->type == UBIFS_IDX_NODE);
549 key_read(c, ubifs_idx_key(c, idx), &snod->key);
550 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
551 snod->offs);
552 if (err)
553 goto out;
554 }
555
556 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
557 if (!idx_gc) {
558 err = -ENOMEM;
559 goto out;
560 }
561
562 idx_gc->lnum = lnum;
563 idx_gc->unmap = 0;
564 list_add(&idx_gc->list, &c->idx_gc);
565
566
567
568
569
570
571
572 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
573 LPROPS_INDEX, 1);
574 if (err)
575 goto out;
576 err = LEB_FREED_IDX;
577 } else {
578 dbg_gc("data LEB %d (free %d, dirty %d)",
579 lnum, lp->free, lp->dirty);
580
581 err = move_nodes(c, sleb);
582 if (err)
583 goto out_inc_seq;
584
585 err = gc_sync_wbufs(c);
586 if (err)
587 goto out_inc_seq;
588
589 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
590 if (err)
591 goto out_inc_seq;
592
593
594 c->gced_lnum = lnum;
595 smp_wmb();
596 c->gc_seq += 1;
597 smp_wmb();
598
599 if (c->gc_lnum == -1) {
600 c->gc_lnum = lnum;
601 err = LEB_RETAINED;
602 } else {
603 err = ubifs_wbuf_sync_nolock(wbuf);
604 if (err)
605 goto out;
606
607 err = ubifs_leb_unmap(c, lnum);
608 if (err)
609 goto out;
610
611 err = LEB_FREED;
612 }
613 }
614
615out:
616 ubifs_scan_destroy(sleb);
617 return err;
618
619out_inc_seq:
620
621 c->gced_lnum = lnum;
622 smp_wmb();
623 c->gc_seq += 1;
624 smp_wmb();
625 goto out;
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
665{
666 int i, err, ret, min_space = c->dead_wm;
667 struct ubifs_lprops lp;
668 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
669
670 ubifs_assert_cmt_locked(c);
671
672 if (ubifs_gc_should_commit(c))
673 return -EAGAIN;
674
675 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
676
677 if (c->ro_media) {
678 ret = -EROFS;
679 goto out_unlock;
680 }
681
682
683 ubifs_assert(!wbuf->used);
684
685 for (i = 0; ; i++) {
686 int space_before = c->leb_size - wbuf->offs - wbuf->used;
687 int space_after;
688
689 cond_resched();
690
691
692 if (ubifs_gc_should_commit(c)) {
693 ret = -EAGAIN;
694 break;
695 }
696
697 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
698
699
700
701
702 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
703 ubifs_commit_required(c);
704 ret = -EAGAIN;
705 break;
706 }
707
708 if (i > HARD_LEBS_LIMIT) {
709
710
711
712
713 dbg_gc("hard limit, -ENOSPC");
714 ret = -ENOSPC;
715 break;
716 }
717
718
719
720
721
722
723
724
725 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
726 if (ret) {
727 if (ret == -ENOSPC)
728 dbg_gc("no more dirty LEBs");
729 break;
730 }
731
732 dbg_gc("found LEB %d: free %d, dirty %d, sum %d "
733 "(min. space %d)", lp.lnum, lp.free, lp.dirty,
734 lp.free + lp.dirty, min_space);
735
736 if (lp.free + lp.dirty == c->leb_size) {
737
738 dbg_gc("LEB %d is free, return it", lp.lnum);
739
740
741
742
743 ubifs_assert(!(lp.flags & LPROPS_INDEX));
744 if (lp.free != c->leb_size) {
745
746
747
748
749
750
751 ret = gc_sync_wbufs(c);
752 if (ret)
753 goto out;
754 ret = ubifs_change_one_lp(c, lp.lnum,
755 c->leb_size, 0, 0, 0,
756 0);
757 if (ret)
758 goto out;
759 }
760 ret = ubifs_leb_unmap(c, lp.lnum);
761 if (ret)
762 goto out;
763 ret = lp.lnum;
764 break;
765 }
766
767 space_before = c->leb_size - wbuf->offs - wbuf->used;
768 if (wbuf->lnum == -1)
769 space_before = 0;
770
771 ret = ubifs_garbage_collect_leb(c, &lp);
772 if (ret < 0) {
773 if (ret == -EAGAIN || ret == -ENOSPC) {
774
775
776
777
778
779
780
781
782 err = ubifs_return_leb(c, lp.lnum);
783 if (err)
784 ret = err;
785 break;
786 }
787 goto out;
788 }
789
790 if (ret == LEB_FREED) {
791
792 dbg_gc("LEB %d freed, return", lp.lnum);
793 ret = lp.lnum;
794 break;
795 }
796
797 if (ret == LEB_FREED_IDX) {
798
799
800
801
802
803
804 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
805 continue;
806 }
807
808 ubifs_assert(ret == LEB_RETAINED);
809 space_after = c->leb_size - wbuf->offs - wbuf->used;
810 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
811 space_after - space_before);
812
813 if (space_after > space_before) {
814
815 min_space >>= 1;
816 if (min_space < c->dead_wm)
817 min_space = c->dead_wm;
818 continue;
819 }
820
821 dbg_gc("did not make progress");
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839 if (i < SOFT_LEBS_LIMIT) {
840 dbg_gc("try again");
841 continue;
842 }
843
844 min_space <<= 1;
845 if (min_space > c->dark_wm)
846 min_space = c->dark_wm;
847 dbg_gc("set min. space to %d", min_space);
848 }
849
850 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
851 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
852 ubifs_commit_required(c);
853 ret = -EAGAIN;
854 }
855
856 err = ubifs_wbuf_sync_nolock(wbuf);
857 if (!err)
858 err = ubifs_leb_unmap(c, c->gc_lnum);
859 if (err) {
860 ret = err;
861 goto out;
862 }
863out_unlock:
864 mutex_unlock(&wbuf->io_mutex);
865 return ret;
866
867out:
868 ubifs_assert(ret < 0);
869 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
870 ubifs_ro_mode(c, ret);
871 ubifs_wbuf_sync_nolock(wbuf);
872 mutex_unlock(&wbuf->io_mutex);
873 ubifs_return_leb(c, lp.lnum);
874 return ret;
875}
876
877
878
879
880
881
882
883
884
885
886
887
888int ubifs_gc_start_commit(struct ubifs_info *c)
889{
890 struct ubifs_gced_idx_leb *idx_gc;
891 const struct ubifs_lprops *lp;
892 int err = 0, flags;
893
894 ubifs_get_lprops(c);
895
896
897
898
899
900 while (1) {
901 lp = ubifs_fast_find_freeable(c);
902 if (IS_ERR(lp)) {
903 err = PTR_ERR(lp);
904 goto out;
905 }
906 if (!lp)
907 break;
908 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
909 ubifs_assert(!(lp->flags & LPROPS_INDEX));
910 err = ubifs_leb_unmap(c, lp->lnum);
911 if (err)
912 goto out;
913 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
914 if (IS_ERR(lp)) {
915 err = PTR_ERR(lp);
916 goto out;
917 }
918 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
919 ubifs_assert(!(lp->flags & LPROPS_INDEX));
920 }
921
922
923 list_for_each_entry(idx_gc, &c->idx_gc, list)
924 idx_gc->unmap = 1;
925
926
927 while (1) {
928 lp = ubifs_fast_find_frdi_idx(c);
929 if (IS_ERR(lp)) {
930 err = PTR_ERR(lp);
931 goto out;
932 }
933 if (!lp)
934 break;
935 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
936 if (!idx_gc) {
937 err = -ENOMEM;
938 goto out;
939 }
940 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
941 ubifs_assert(lp->flags & LPROPS_INDEX);
942
943 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
944 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
945 if (IS_ERR(lp)) {
946 err = PTR_ERR(lp);
947 kfree(idx_gc);
948 goto out;
949 }
950 ubifs_assert(lp->flags & LPROPS_TAKEN);
951 ubifs_assert(!(lp->flags & LPROPS_INDEX));
952 idx_gc->lnum = lp->lnum;
953 idx_gc->unmap = 1;
954 list_add(&idx_gc->list, &c->idx_gc);
955 }
956out:
957 ubifs_release_lprops(c);
958 return err;
959}
960
961
962
963
964
965
966
967int ubifs_gc_end_commit(struct ubifs_info *c)
968{
969 struct ubifs_gced_idx_leb *idx_gc, *tmp;
970 struct ubifs_wbuf *wbuf;
971 int err = 0;
972
973 wbuf = &c->jheads[GCHD].wbuf;
974 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
975 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
976 if (idx_gc->unmap) {
977 dbg_gc("LEB %d", idx_gc->lnum);
978 err = ubifs_leb_unmap(c, idx_gc->lnum);
979 if (err)
980 goto out;
981 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
982 LPROPS_NC, 0, LPROPS_TAKEN, -1);
983 if (err)
984 goto out;
985 list_del(&idx_gc->list);
986 kfree(idx_gc);
987 }
988out:
989 mutex_unlock(&wbuf->io_mutex);
990 return err;
991}
992
993
994
995
996
997
998
999
1000
1001void ubifs_destroy_idx_gc(struct ubifs_info *c)
1002{
1003 while (!list_empty(&c->idx_gc)) {
1004 struct ubifs_gced_idx_leb *idx_gc;
1005
1006 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
1007 list);
1008 c->idx_gc_cnt -= 1;
1009 list_del(&idx_gc->list);
1010 kfree(idx_gc);
1011 }
1012}
1013
1014
1015
1016
1017
1018
1019
1020int ubifs_get_idx_gc_leb(struct ubifs_info *c)
1021{
1022 struct ubifs_gced_idx_leb *idx_gc;
1023 int lnum;
1024
1025 if (list_empty(&c->idx_gc))
1026 return -ENOSPC;
1027 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
1028 lnum = idx_gc->lnum;
1029
1030 list_del(&idx_gc->list);
1031 kfree(idx_gc);
1032 return lnum;
1033}
1034