1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#ifndef __UBOOT__
44#include <linux/slab.h>
45#include <linux/pagemap.h>
46#include <linux/list_sort.h>
47#endif
48#include "ubifs.h"
49
50#ifndef __UBOOT__
51
52
53
54
55
56#define SOFT_LEBS_LIMIT 4
57#define HARD_LEBS_LIMIT 32
58
59
60
61
62
63
64
65
66
67
68
69
70
71static int switch_gc_head(struct ubifs_info *c)
72{
73 int err, gc_lnum = c->gc_lnum;
74 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
75
76 ubifs_assert(gc_lnum != -1);
77 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
78 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
79 c->leb_size - wbuf->offs - wbuf->used);
80
81 err = ubifs_wbuf_sync_nolock(wbuf);
82 if (err)
83 return err;
84
85
86
87
88
89 err = ubifs_leb_unmap(c, gc_lnum);
90 if (err)
91 return err;
92
93 err = ubifs_wbuf_sync_nolock(wbuf);
94 if (err)
95 return err;
96
97 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
98 if (err)
99 return err;
100
101 c->gc_lnum = -1;
102 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
103 return err;
104}
105
106
107
108
109
110
111
112
113
114
115static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
116{
117 ino_t inuma, inumb;
118 struct ubifs_info *c = priv;
119 struct ubifs_scan_node *sa, *sb;
120
121 cond_resched();
122 if (a == b)
123 return 0;
124
125 sa = list_entry(a, struct ubifs_scan_node, list);
126 sb = list_entry(b, struct ubifs_scan_node, list);
127
128 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
129 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
130 ubifs_assert(sa->type == UBIFS_DATA_NODE);
131 ubifs_assert(sb->type == UBIFS_DATA_NODE);
132
133 inuma = key_inum(c, &sa->key);
134 inumb = key_inum(c, &sb->key);
135
136 if (inuma == inumb) {
137 unsigned int blka = key_block(c, &sa->key);
138 unsigned int blkb = key_block(c, &sb->key);
139
140 if (blka <= blkb)
141 return -1;
142 } else if (inuma <= inumb)
143 return -1;
144
145 return 1;
146}
147
148
149
150
151
152
153
154
155
156
157
158static int nondata_nodes_cmp(void *priv, struct list_head *a,
159 struct list_head *b)
160{
161 ino_t inuma, inumb;
162 struct ubifs_info *c = priv;
163 struct ubifs_scan_node *sa, *sb;
164
165 cond_resched();
166 if (a == b)
167 return 0;
168
169 sa = list_entry(a, struct ubifs_scan_node, list);
170 sb = list_entry(b, struct ubifs_scan_node, list);
171
172 ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
173 key_type(c, &sb->key) != UBIFS_DATA_KEY);
174 ubifs_assert(sa->type != UBIFS_DATA_NODE &&
175 sb->type != UBIFS_DATA_NODE);
176
177
178 if (sa->type == UBIFS_INO_NODE) {
179 if (sb->type == UBIFS_INO_NODE)
180 return sb->len - sa->len;
181 return -1;
182 }
183 if (sb->type == UBIFS_INO_NODE)
184 return 1;
185
186 ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
187 key_type(c, &sa->key) == UBIFS_XENT_KEY);
188 ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
189 key_type(c, &sb->key) == UBIFS_XENT_KEY);
190 ubifs_assert(sa->type == UBIFS_DENT_NODE ||
191 sa->type == UBIFS_XENT_NODE);
192 ubifs_assert(sb->type == UBIFS_DENT_NODE ||
193 sb->type == UBIFS_XENT_NODE);
194
195 inuma = key_inum(c, &sa->key);
196 inumb = key_inum(c, &sb->key);
197
198 if (inuma == inumb) {
199 uint32_t hasha = key_hash(c, &sa->key);
200 uint32_t hashb = key_hash(c, &sb->key);
201
202 if (hasha <= hashb)
203 return -1;
204 } else if (inuma <= inumb)
205 return -1;
206
207 return 1;
208}
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
238 struct list_head *nondata, int *min)
239{
240 int err;
241 struct ubifs_scan_node *snod, *tmp;
242
243 *min = INT_MAX;
244
245
246 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
247 ubifs_assert(snod->type == UBIFS_INO_NODE ||
248 snod->type == UBIFS_DATA_NODE ||
249 snod->type == UBIFS_DENT_NODE ||
250 snod->type == UBIFS_XENT_NODE ||
251 snod->type == UBIFS_TRUN_NODE);
252
253 if (snod->type != UBIFS_INO_NODE &&
254 snod->type != UBIFS_DATA_NODE &&
255 snod->type != UBIFS_DENT_NODE &&
256 snod->type != UBIFS_XENT_NODE) {
257
258 list_del(&snod->list);
259 kfree(snod);
260 continue;
261 }
262
263 ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
264 key_type(c, &snod->key) == UBIFS_INO_KEY ||
265 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
266 key_type(c, &snod->key) == UBIFS_XENT_KEY);
267
268 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
269 snod->offs, 0);
270 if (err < 0)
271 return err;
272
273 if (!err) {
274
275 list_del(&snod->list);
276 kfree(snod);
277 continue;
278 }
279
280 if (snod->len < *min)
281 *min = snod->len;
282
283 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
284 list_move_tail(&snod->list, nondata);
285 }
286
287
288 list_sort(c, &sleb->nodes, &data_nodes_cmp);
289 list_sort(c, nondata, &nondata_nodes_cmp);
290
291 err = dbg_check_data_nodes_order(c, &sleb->nodes);
292 if (err)
293 return err;
294 err = dbg_check_nondata_nodes_order(c, nondata);
295 if (err)
296 return err;
297 return 0;
298}
299
300
301
302
303
304
305
306
307
308
309
310
311static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
312 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
313{
314 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
315
316 cond_resched();
317 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
318 if (err)
319 return err;
320
321 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
322 snod->offs, new_lnum, new_offs,
323 snod->len);
324 list_del(&snod->list);
325 kfree(snod);
326 return err;
327}
328
329
330
331
332
333
334
335
336
337
338
339static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
340{
341 int err, min;
342 LIST_HEAD(nondata);
343 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
344
345 if (wbuf->lnum == -1) {
346
347
348
349
350 err = switch_gc_head(c);
351 if (err)
352 return err;
353 }
354
355 err = sort_nodes(c, sleb, &nondata, &min);
356 if (err)
357 goto out;
358
359
360 while (1) {
361 int avail;
362 struct ubifs_scan_node *snod, *tmp;
363
364
365 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
366 avail = c->leb_size - wbuf->offs - wbuf->used;
367 if (snod->len > avail)
368
369
370
371
372 break;
373
374 err = move_node(c, sleb, snod, wbuf);
375 if (err)
376 goto out;
377 }
378
379
380 list_for_each_entry_safe(snod, tmp, &nondata, list) {
381 avail = c->leb_size - wbuf->offs - wbuf->used;
382 if (avail < min)
383 break;
384
385 if (snod->len > avail) {
386
387
388
389
390
391
392
393 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
394 snod->len == UBIFS_INO_NODE_SZ)
395 break;
396 continue;
397 }
398
399 err = move_node(c, sleb, snod, wbuf);
400 if (err)
401 goto out;
402 }
403
404 if (list_empty(&sleb->nodes) && list_empty(&nondata))
405 break;
406
407
408
409
410
411 err = switch_gc_head(c);
412 if (err)
413 goto out;
414 }
415
416 return 0;
417
418out:
419 list_splice_tail(&nondata, &sleb->nodes);
420 return err;
421}
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436static int gc_sync_wbufs(struct ubifs_info *c)
437{
438 int err, i;
439
440 for (i = 0; i < c->jhead_cnt; i++) {
441 if (i == GCHD)
442 continue;
443 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
444 if (err)
445 return err;
446 }
447 return 0;
448}
449
450
451
452
453
454
455
456
457
458
459int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
460{
461 struct ubifs_scan_leb *sleb;
462 struct ubifs_scan_node *snod;
463 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
464 int err = 0, lnum = lp->lnum;
465
466 ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
467 c->need_recovery);
468 ubifs_assert(c->gc_lnum != lnum);
469 ubifs_assert(wbuf->lnum != lnum);
470
471 if (lp->free + lp->dirty == c->leb_size) {
472
473 dbg_gc("LEB %d is free, return it", lp->lnum);
474 ubifs_assert(!(lp->flags & LPROPS_INDEX));
475
476 if (lp->free != c->leb_size) {
477
478
479
480
481
482 err = gc_sync_wbufs(c);
483 if (err)
484 return err;
485 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
486 0, 0, 0, 0);
487 if (err)
488 return err;
489 }
490 err = ubifs_leb_unmap(c, lp->lnum);
491 if (err)
492 return err;
493
494 if (c->gc_lnum == -1) {
495 c->gc_lnum = lnum;
496 return LEB_RETAINED;
497 }
498
499 return LEB_FREED;
500 }
501
502
503
504
505
506 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
507 if (IS_ERR(sleb))
508 return PTR_ERR(sleb);
509
510 ubifs_assert(!list_empty(&sleb->nodes));
511 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
512
513 if (snod->type == UBIFS_IDX_NODE) {
514 struct ubifs_gced_idx_leb *idx_gc;
515
516 dbg_gc("indexing LEB %d (free %d, dirty %d)",
517 lnum, lp->free, lp->dirty);
518 list_for_each_entry(snod, &sleb->nodes, list) {
519 struct ubifs_idx_node *idx = snod->node;
520 int level = le16_to_cpu(idx->level);
521
522 ubifs_assert(snod->type == UBIFS_IDX_NODE);
523 key_read(c, ubifs_idx_key(c, idx), &snod->key);
524 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
525 snod->offs);
526 if (err)
527 goto out;
528 }
529
530 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
531 if (!idx_gc) {
532 err = -ENOMEM;
533 goto out;
534 }
535
536 idx_gc->lnum = lnum;
537 idx_gc->unmap = 0;
538 list_add(&idx_gc->list, &c->idx_gc);
539
540
541
542
543
544
545
546 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
547 LPROPS_INDEX, 1);
548 if (err)
549 goto out;
550 err = LEB_FREED_IDX;
551 } else {
552 dbg_gc("data LEB %d (free %d, dirty %d)",
553 lnum, lp->free, lp->dirty);
554
555 err = move_nodes(c, sleb);
556 if (err)
557 goto out_inc_seq;
558
559 err = gc_sync_wbufs(c);
560 if (err)
561 goto out_inc_seq;
562
563 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
564 if (err)
565 goto out_inc_seq;
566
567
568 c->gced_lnum = lnum;
569 smp_wmb();
570 c->gc_seq += 1;
571 smp_wmb();
572
573 if (c->gc_lnum == -1) {
574 c->gc_lnum = lnum;
575 err = LEB_RETAINED;
576 } else {
577 err = ubifs_wbuf_sync_nolock(wbuf);
578 if (err)
579 goto out;
580
581 err = ubifs_leb_unmap(c, lnum);
582 if (err)
583 goto out;
584
585 err = LEB_FREED;
586 }
587 }
588
589out:
590 ubifs_scan_destroy(sleb);
591 return err;
592
593out_inc_seq:
594
595 c->gced_lnum = lnum;
596 smp_wmb();
597 c->gc_seq += 1;
598 smp_wmb();
599 goto out;
600}
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
639{
640 int i, err, ret, min_space = c->dead_wm;
641 struct ubifs_lprops lp;
642 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
643
644 ubifs_assert_cmt_locked(c);
645 ubifs_assert(!c->ro_media && !c->ro_mount);
646
647 if (ubifs_gc_should_commit(c))
648 return -EAGAIN;
649
650 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
651
652 if (c->ro_error) {
653 ret = -EROFS;
654 goto out_unlock;
655 }
656
657
658 ubifs_assert(!wbuf->used);
659
660 for (i = 0; ; i++) {
661 int space_before, space_after;
662
663 cond_resched();
664
665
666 if (ubifs_gc_should_commit(c)) {
667 ret = -EAGAIN;
668 break;
669 }
670
671 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
672
673
674
675
676 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
677 ubifs_commit_required(c);
678 ret = -EAGAIN;
679 break;
680 }
681
682 if (i > HARD_LEBS_LIMIT) {
683
684
685
686
687 dbg_gc("hard limit, -ENOSPC");
688 ret = -ENOSPC;
689 break;
690 }
691
692
693
694
695
696
697
698
699 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
700 if (ret) {
701 if (ret == -ENOSPC)
702 dbg_gc("no more dirty LEBs");
703 break;
704 }
705
706 dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
707 lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
708 min_space);
709
710 space_before = c->leb_size - wbuf->offs - wbuf->used;
711 if (wbuf->lnum == -1)
712 space_before = 0;
713
714 ret = ubifs_garbage_collect_leb(c, &lp);
715 if (ret < 0) {
716 if (ret == -EAGAIN) {
717
718
719
720
721
722
723 err = ubifs_return_leb(c, lp.lnum);
724 if (err)
725 ret = err;
726 break;
727 }
728 goto out;
729 }
730
731 if (ret == LEB_FREED) {
732
733 dbg_gc("LEB %d freed, return", lp.lnum);
734 ret = lp.lnum;
735 break;
736 }
737
738 if (ret == LEB_FREED_IDX) {
739
740
741
742
743
744
745 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
746 continue;
747 }
748
749 ubifs_assert(ret == LEB_RETAINED);
750 space_after = c->leb_size - wbuf->offs - wbuf->used;
751 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
752 space_after - space_before);
753
754 if (space_after > space_before) {
755
756 min_space >>= 1;
757 if (min_space < c->dead_wm)
758 min_space = c->dead_wm;
759 continue;
760 }
761
762 dbg_gc("did not make progress");
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780 if (i < SOFT_LEBS_LIMIT) {
781 dbg_gc("try again");
782 continue;
783 }
784
785 min_space <<= 1;
786 if (min_space > c->dark_wm)
787 min_space = c->dark_wm;
788 dbg_gc("set min. space to %d", min_space);
789 }
790
791 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
792 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
793 ubifs_commit_required(c);
794 ret = -EAGAIN;
795 }
796
797 err = ubifs_wbuf_sync_nolock(wbuf);
798 if (!err)
799 err = ubifs_leb_unmap(c, c->gc_lnum);
800 if (err) {
801 ret = err;
802 goto out;
803 }
804out_unlock:
805 mutex_unlock(&wbuf->io_mutex);
806 return ret;
807
808out:
809 ubifs_assert(ret < 0);
810 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
811 ubifs_wbuf_sync_nolock(wbuf);
812 ubifs_ro_mode(c, ret);
813 mutex_unlock(&wbuf->io_mutex);
814 ubifs_return_leb(c, lp.lnum);
815 return ret;
816}
817
818
819
820
821
822
823
824
825
826
827
828
829int ubifs_gc_start_commit(struct ubifs_info *c)
830{
831 struct ubifs_gced_idx_leb *idx_gc;
832 const struct ubifs_lprops *lp;
833 int err = 0, flags;
834
835 ubifs_get_lprops(c);
836
837
838
839
840
841 while (1) {
842 lp = ubifs_fast_find_freeable(c);
843 if (IS_ERR(lp)) {
844 err = PTR_ERR(lp);
845 goto out;
846 }
847 if (!lp)
848 break;
849 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
850 ubifs_assert(!(lp->flags & LPROPS_INDEX));
851 err = ubifs_leb_unmap(c, lp->lnum);
852 if (err)
853 goto out;
854 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
855 if (IS_ERR(lp)) {
856 err = PTR_ERR(lp);
857 goto out;
858 }
859 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
860 ubifs_assert(!(lp->flags & LPROPS_INDEX));
861 }
862
863
864 list_for_each_entry(idx_gc, &c->idx_gc, list)
865 idx_gc->unmap = 1;
866
867
868 while (1) {
869 lp = ubifs_fast_find_frdi_idx(c);
870 if (IS_ERR(lp)) {
871 err = PTR_ERR(lp);
872 goto out;
873 }
874 if (!lp)
875 break;
876 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
877 if (!idx_gc) {
878 err = -ENOMEM;
879 goto out;
880 }
881 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
882 ubifs_assert(lp->flags & LPROPS_INDEX);
883
884 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
885 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
886 if (IS_ERR(lp)) {
887 err = PTR_ERR(lp);
888 kfree(idx_gc);
889 goto out;
890 }
891 ubifs_assert(lp->flags & LPROPS_TAKEN);
892 ubifs_assert(!(lp->flags & LPROPS_INDEX));
893 idx_gc->lnum = lp->lnum;
894 idx_gc->unmap = 1;
895 list_add(&idx_gc->list, &c->idx_gc);
896 }
897out:
898 ubifs_release_lprops(c);
899 return err;
900}
901
902
903
904
905
906
907
908int ubifs_gc_end_commit(struct ubifs_info *c)
909{
910 struct ubifs_gced_idx_leb *idx_gc, *tmp;
911 struct ubifs_wbuf *wbuf;
912 int err = 0;
913
914 wbuf = &c->jheads[GCHD].wbuf;
915 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
916 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
917 if (idx_gc->unmap) {
918 dbg_gc("LEB %d", idx_gc->lnum);
919 err = ubifs_leb_unmap(c, idx_gc->lnum);
920 if (err)
921 goto out;
922 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
923 LPROPS_NC, 0, LPROPS_TAKEN, -1);
924 if (err)
925 goto out;
926 list_del(&idx_gc->list);
927 kfree(idx_gc);
928 }
929out:
930 mutex_unlock(&wbuf->io_mutex);
931 return err;
932}
933#endif
934
935
936
937
938
939
940
941
942void ubifs_destroy_idx_gc(struct ubifs_info *c)
943{
944 while (!list_empty(&c->idx_gc)) {
945 struct ubifs_gced_idx_leb *idx_gc;
946
947 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
948 list);
949 c->idx_gc_cnt -= 1;
950 list_del(&idx_gc->list);
951 kfree(idx_gc);
952 }
953}
954#ifndef __UBOOT__
955
956
957
958
959
960
961int ubifs_get_idx_gc_leb(struct ubifs_info *c)
962{
963 struct ubifs_gced_idx_leb *idx_gc;
964 int lnum;
965
966 if (list_empty(&c->idx_gc))
967 return -ENOSPC;
968 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
969 lnum = idx_gc->lnum;
970
971 list_del(&idx_gc->list);
972 kfree(idx_gc);
973 return lnum;
974}
975#endif
976