1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#ifndef __UBOOT__
45#include <linux/slab.h>
46#include <linux/pagemap.h>
47#include <linux/list_sort.h>
48#endif
49#include "ubifs.h"
50
51#ifndef __UBOOT__
52
53
54
55
56
57#define SOFT_LEBS_LIMIT 4
58#define HARD_LEBS_LIMIT 32
59
60
61
62
63
64
65
66
67
68
69
70
71
72static int switch_gc_head(struct ubifs_info *c)
73{
74 int err, gc_lnum = c->gc_lnum;
75 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
76
77 ubifs_assert(gc_lnum != -1);
78 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
79 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
80 c->leb_size - wbuf->offs - wbuf->used);
81
82 err = ubifs_wbuf_sync_nolock(wbuf);
83 if (err)
84 return err;
85
86
87
88
89
90 err = ubifs_leb_unmap(c, gc_lnum);
91 if (err)
92 return err;
93
94 err = ubifs_wbuf_sync_nolock(wbuf);
95 if (err)
96 return err;
97
98 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
99 if (err)
100 return err;
101
102 c->gc_lnum = -1;
103 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
104 return err;
105}
106
107
108
109
110
111
112
113
114
115
116static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
117{
118 ino_t inuma, inumb;
119 struct ubifs_info *c = priv;
120 struct ubifs_scan_node *sa, *sb;
121
122 cond_resched();
123 if (a == b)
124 return 0;
125
126 sa = list_entry(a, struct ubifs_scan_node, list);
127 sb = list_entry(b, struct ubifs_scan_node, list);
128
129 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
130 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
131 ubifs_assert(sa->type == UBIFS_DATA_NODE);
132 ubifs_assert(sb->type == UBIFS_DATA_NODE);
133
134 inuma = key_inum(c, &sa->key);
135 inumb = key_inum(c, &sb->key);
136
137 if (inuma == inumb) {
138 unsigned int blka = key_block(c, &sa->key);
139 unsigned int blkb = key_block(c, &sb->key);
140
141 if (blka <= blkb)
142 return -1;
143 } else if (inuma <= inumb)
144 return -1;
145
146 return 1;
147}
148
149
150
151
152
153
154
155
156
157
158
159static int nondata_nodes_cmp(void *priv, struct list_head *a,
160 struct list_head *b)
161{
162 ino_t inuma, inumb;
163 struct ubifs_info *c = priv;
164 struct ubifs_scan_node *sa, *sb;
165
166 cond_resched();
167 if (a == b)
168 return 0;
169
170 sa = list_entry(a, struct ubifs_scan_node, list);
171 sb = list_entry(b, struct ubifs_scan_node, list);
172
173 ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
174 key_type(c, &sb->key) != UBIFS_DATA_KEY);
175 ubifs_assert(sa->type != UBIFS_DATA_NODE &&
176 sb->type != UBIFS_DATA_NODE);
177
178
179 if (sa->type == UBIFS_INO_NODE) {
180 if (sb->type == UBIFS_INO_NODE)
181 return sb->len - sa->len;
182 return -1;
183 }
184 if (sb->type == UBIFS_INO_NODE)
185 return 1;
186
187 ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
188 key_type(c, &sa->key) == UBIFS_XENT_KEY);
189 ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
190 key_type(c, &sb->key) == UBIFS_XENT_KEY);
191 ubifs_assert(sa->type == UBIFS_DENT_NODE ||
192 sa->type == UBIFS_XENT_NODE);
193 ubifs_assert(sb->type == UBIFS_DENT_NODE ||
194 sb->type == UBIFS_XENT_NODE);
195
196 inuma = key_inum(c, &sa->key);
197 inumb = key_inum(c, &sb->key);
198
199 if (inuma == inumb) {
200 uint32_t hasha = key_hash(c, &sa->key);
201 uint32_t hashb = key_hash(c, &sb->key);
202
203 if (hasha <= hashb)
204 return -1;
205 } else if (inuma <= inumb)
206 return -1;
207
208 return 1;
209}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
239 struct list_head *nondata, int *min)
240{
241 int err;
242 struct ubifs_scan_node *snod, *tmp;
243
244 *min = INT_MAX;
245
246
247 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
248 ubifs_assert(snod->type == UBIFS_INO_NODE ||
249 snod->type == UBIFS_DATA_NODE ||
250 snod->type == UBIFS_DENT_NODE ||
251 snod->type == UBIFS_XENT_NODE ||
252 snod->type == UBIFS_TRUN_NODE);
253
254 if (snod->type != UBIFS_INO_NODE &&
255 snod->type != UBIFS_DATA_NODE &&
256 snod->type != UBIFS_DENT_NODE &&
257 snod->type != UBIFS_XENT_NODE) {
258
259 list_del(&snod->list);
260 kfree(snod);
261 continue;
262 }
263
264 ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
265 key_type(c, &snod->key) == UBIFS_INO_KEY ||
266 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
267 key_type(c, &snod->key) == UBIFS_XENT_KEY);
268
269 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
270 snod->offs, 0);
271 if (err < 0)
272 return err;
273
274 if (!err) {
275
276 list_del(&snod->list);
277 kfree(snod);
278 continue;
279 }
280
281 if (snod->len < *min)
282 *min = snod->len;
283
284 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
285 list_move_tail(&snod->list, nondata);
286 }
287
288
289 list_sort(c, &sleb->nodes, &data_nodes_cmp);
290 list_sort(c, nondata, &nondata_nodes_cmp);
291
292 err = dbg_check_data_nodes_order(c, &sleb->nodes);
293 if (err)
294 return err;
295 err = dbg_check_nondata_nodes_order(c, nondata);
296 if (err)
297 return err;
298 return 0;
299}
300
301
302
303
304
305
306
307
308
309
310
311
312static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
313 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
314{
315 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
316
317 cond_resched();
318 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
319 if (err)
320 return err;
321
322 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
323 snod->offs, new_lnum, new_offs,
324 snod->len);
325 list_del(&snod->list);
326 kfree(snod);
327 return err;
328}
329
330
331
332
333
334
335
336
337
338
339
340static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
341{
342 int err, min;
343 LIST_HEAD(nondata);
344 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
345
346 if (wbuf->lnum == -1) {
347
348
349
350
351 err = switch_gc_head(c);
352 if (err)
353 return err;
354 }
355
356 err = sort_nodes(c, sleb, &nondata, &min);
357 if (err)
358 goto out;
359
360
361 while (1) {
362 int avail;
363 struct ubifs_scan_node *snod, *tmp;
364
365
366 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
367 avail = c->leb_size - wbuf->offs - wbuf->used;
368 if (snod->len > avail)
369
370
371
372
373 break;
374
375 err = move_node(c, sleb, snod, wbuf);
376 if (err)
377 goto out;
378 }
379
380
381 list_for_each_entry_safe(snod, tmp, &nondata, list) {
382 avail = c->leb_size - wbuf->offs - wbuf->used;
383 if (avail < min)
384 break;
385
386 if (snod->len > avail) {
387
388
389
390
391
392
393
394 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
395 snod->len == UBIFS_INO_NODE_SZ)
396 break;
397 continue;
398 }
399
400 err = move_node(c, sleb, snod, wbuf);
401 if (err)
402 goto out;
403 }
404
405 if (list_empty(&sleb->nodes) && list_empty(&nondata))
406 break;
407
408
409
410
411
412 err = switch_gc_head(c);
413 if (err)
414 goto out;
415 }
416
417 return 0;
418
419out:
420 list_splice_tail(&nondata, &sleb->nodes);
421 return err;
422}
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437static int gc_sync_wbufs(struct ubifs_info *c)
438{
439 int err, i;
440
441 for (i = 0; i < c->jhead_cnt; i++) {
442 if (i == GCHD)
443 continue;
444 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
445 if (err)
446 return err;
447 }
448 return 0;
449}
450
451
452
453
454
455
456
457
458
459
460int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
461{
462 struct ubifs_scan_leb *sleb;
463 struct ubifs_scan_node *snod;
464 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
465 int err = 0, lnum = lp->lnum;
466
467 ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
468 c->need_recovery);
469 ubifs_assert(c->gc_lnum != lnum);
470 ubifs_assert(wbuf->lnum != lnum);
471
472 if (lp->free + lp->dirty == c->leb_size) {
473
474 dbg_gc("LEB %d is free, return it", lp->lnum);
475 ubifs_assert(!(lp->flags & LPROPS_INDEX));
476
477 if (lp->free != c->leb_size) {
478
479
480
481
482
483 err = gc_sync_wbufs(c);
484 if (err)
485 return err;
486 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
487 0, 0, 0, 0);
488 if (err)
489 return err;
490 }
491 err = ubifs_leb_unmap(c, lp->lnum);
492 if (err)
493 return err;
494
495 if (c->gc_lnum == -1) {
496 c->gc_lnum = lnum;
497 return LEB_RETAINED;
498 }
499
500 return LEB_FREED;
501 }
502
503
504
505
506
507 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
508 if (IS_ERR(sleb))
509 return PTR_ERR(sleb);
510
511 ubifs_assert(!list_empty(&sleb->nodes));
512 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
513
514 if (snod->type == UBIFS_IDX_NODE) {
515 struct ubifs_gced_idx_leb *idx_gc;
516
517 dbg_gc("indexing LEB %d (free %d, dirty %d)",
518 lnum, lp->free, lp->dirty);
519 list_for_each_entry(snod, &sleb->nodes, list) {
520 struct ubifs_idx_node *idx = snod->node;
521 int level = le16_to_cpu(idx->level);
522
523 ubifs_assert(snod->type == UBIFS_IDX_NODE);
524 key_read(c, ubifs_idx_key(c, idx), &snod->key);
525 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
526 snod->offs);
527 if (err)
528 goto out;
529 }
530
531 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
532 if (!idx_gc) {
533 err = -ENOMEM;
534 goto out;
535 }
536
537 idx_gc->lnum = lnum;
538 idx_gc->unmap = 0;
539 list_add(&idx_gc->list, &c->idx_gc);
540
541
542
543
544
545
546
547 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
548 LPROPS_INDEX, 1);
549 if (err)
550 goto out;
551 err = LEB_FREED_IDX;
552 } else {
553 dbg_gc("data LEB %d (free %d, dirty %d)",
554 lnum, lp->free, lp->dirty);
555
556 err = move_nodes(c, sleb);
557 if (err)
558 goto out_inc_seq;
559
560 err = gc_sync_wbufs(c);
561 if (err)
562 goto out_inc_seq;
563
564 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
565 if (err)
566 goto out_inc_seq;
567
568
569 c->gced_lnum = lnum;
570 smp_wmb();
571 c->gc_seq += 1;
572 smp_wmb();
573
574 if (c->gc_lnum == -1) {
575 c->gc_lnum = lnum;
576 err = LEB_RETAINED;
577 } else {
578 err = ubifs_wbuf_sync_nolock(wbuf);
579 if (err)
580 goto out;
581
582 err = ubifs_leb_unmap(c, lnum);
583 if (err)
584 goto out;
585
586 err = LEB_FREED;
587 }
588 }
589
590out:
591 ubifs_scan_destroy(sleb);
592 return err;
593
594out_inc_seq:
595
596 c->gced_lnum = lnum;
597 smp_wmb();
598 c->gc_seq += 1;
599 smp_wmb();
600 goto out;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
640{
641 int i, err, ret, min_space = c->dead_wm;
642 struct ubifs_lprops lp;
643 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
644
645 ubifs_assert_cmt_locked(c);
646 ubifs_assert(!c->ro_media && !c->ro_mount);
647
648 if (ubifs_gc_should_commit(c))
649 return -EAGAIN;
650
651 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
652
653 if (c->ro_error) {
654 ret = -EROFS;
655 goto out_unlock;
656 }
657
658
659 ubifs_assert(!wbuf->used);
660
661 for (i = 0; ; i++) {
662 int space_before, space_after;
663
664 cond_resched();
665
666
667 if (ubifs_gc_should_commit(c)) {
668 ret = -EAGAIN;
669 break;
670 }
671
672 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
673
674
675
676
677 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
678 ubifs_commit_required(c);
679 ret = -EAGAIN;
680 break;
681 }
682
683 if (i > HARD_LEBS_LIMIT) {
684
685
686
687
688 dbg_gc("hard limit, -ENOSPC");
689 ret = -ENOSPC;
690 break;
691 }
692
693
694
695
696
697
698
699
700 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
701 if (ret) {
702 if (ret == -ENOSPC)
703 dbg_gc("no more dirty LEBs");
704 break;
705 }
706
707 dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
708 lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
709 min_space);
710
711 space_before = c->leb_size - wbuf->offs - wbuf->used;
712 if (wbuf->lnum == -1)
713 space_before = 0;
714
715 ret = ubifs_garbage_collect_leb(c, &lp);
716 if (ret < 0) {
717 if (ret == -EAGAIN) {
718
719
720
721
722
723
724 err = ubifs_return_leb(c, lp.lnum);
725 if (err)
726 ret = err;
727 break;
728 }
729 goto out;
730 }
731
732 if (ret == LEB_FREED) {
733
734 dbg_gc("LEB %d freed, return", lp.lnum);
735 ret = lp.lnum;
736 break;
737 }
738
739 if (ret == LEB_FREED_IDX) {
740
741
742
743
744
745
746 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
747 continue;
748 }
749
750 ubifs_assert(ret == LEB_RETAINED);
751 space_after = c->leb_size - wbuf->offs - wbuf->used;
752 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
753 space_after - space_before);
754
755 if (space_after > space_before) {
756
757 min_space >>= 1;
758 if (min_space < c->dead_wm)
759 min_space = c->dead_wm;
760 continue;
761 }
762
763 dbg_gc("did not make progress");
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781 if (i < SOFT_LEBS_LIMIT) {
782 dbg_gc("try again");
783 continue;
784 }
785
786 min_space <<= 1;
787 if (min_space > c->dark_wm)
788 min_space = c->dark_wm;
789 dbg_gc("set min. space to %d", min_space);
790 }
791
792 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
793 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
794 ubifs_commit_required(c);
795 ret = -EAGAIN;
796 }
797
798 err = ubifs_wbuf_sync_nolock(wbuf);
799 if (!err)
800 err = ubifs_leb_unmap(c, c->gc_lnum);
801 if (err) {
802 ret = err;
803 goto out;
804 }
805out_unlock:
806 mutex_unlock(&wbuf->io_mutex);
807 return ret;
808
809out:
810 ubifs_assert(ret < 0);
811 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
812 ubifs_wbuf_sync_nolock(wbuf);
813 ubifs_ro_mode(c, ret);
814 mutex_unlock(&wbuf->io_mutex);
815 ubifs_return_leb(c, lp.lnum);
816 return ret;
817}
818
819
820
821
822
823
824
825
826
827
828
829
830int ubifs_gc_start_commit(struct ubifs_info *c)
831{
832 struct ubifs_gced_idx_leb *idx_gc;
833 const struct ubifs_lprops *lp;
834 int err = 0, flags;
835
836 ubifs_get_lprops(c);
837
838
839
840
841
842 while (1) {
843 lp = ubifs_fast_find_freeable(c);
844 if (IS_ERR(lp)) {
845 err = PTR_ERR(lp);
846 goto out;
847 }
848 if (!lp)
849 break;
850 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
851 ubifs_assert(!(lp->flags & LPROPS_INDEX));
852 err = ubifs_leb_unmap(c, lp->lnum);
853 if (err)
854 goto out;
855 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
856 if (IS_ERR(lp)) {
857 err = PTR_ERR(lp);
858 goto out;
859 }
860 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
861 ubifs_assert(!(lp->flags & LPROPS_INDEX));
862 }
863
864
865 list_for_each_entry(idx_gc, &c->idx_gc, list)
866 idx_gc->unmap = 1;
867
868
869 while (1) {
870 lp = ubifs_fast_find_frdi_idx(c);
871 if (IS_ERR(lp)) {
872 err = PTR_ERR(lp);
873 goto out;
874 }
875 if (!lp)
876 break;
877 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
878 if (!idx_gc) {
879 err = -ENOMEM;
880 goto out;
881 }
882 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
883 ubifs_assert(lp->flags & LPROPS_INDEX);
884
885 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
886 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
887 if (IS_ERR(lp)) {
888 err = PTR_ERR(lp);
889 kfree(idx_gc);
890 goto out;
891 }
892 ubifs_assert(lp->flags & LPROPS_TAKEN);
893 ubifs_assert(!(lp->flags & LPROPS_INDEX));
894 idx_gc->lnum = lp->lnum;
895 idx_gc->unmap = 1;
896 list_add(&idx_gc->list, &c->idx_gc);
897 }
898out:
899 ubifs_release_lprops(c);
900 return err;
901}
902
903
904
905
906
907
908
909int ubifs_gc_end_commit(struct ubifs_info *c)
910{
911 struct ubifs_gced_idx_leb *idx_gc, *tmp;
912 struct ubifs_wbuf *wbuf;
913 int err = 0;
914
915 wbuf = &c->jheads[GCHD].wbuf;
916 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
917 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
918 if (idx_gc->unmap) {
919 dbg_gc("LEB %d", idx_gc->lnum);
920 err = ubifs_leb_unmap(c, idx_gc->lnum);
921 if (err)
922 goto out;
923 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
924 LPROPS_NC, 0, LPROPS_TAKEN, -1);
925 if (err)
926 goto out;
927 list_del(&idx_gc->list);
928 kfree(idx_gc);
929 }
930out:
931 mutex_unlock(&wbuf->io_mutex);
932 return err;
933}
934#endif
935
936
937
938
939
940
941
942
943void ubifs_destroy_idx_gc(struct ubifs_info *c)
944{
945 while (!list_empty(&c->idx_gc)) {
946 struct ubifs_gced_idx_leb *idx_gc;
947
948 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
949 list);
950 c->idx_gc_cnt -= 1;
951 list_del(&idx_gc->list);
952 kfree(idx_gc);
953 }
954}
955#ifndef __UBOOT__
956
957
958
959
960
961
962int ubifs_get_idx_gc_leb(struct ubifs_info *c)
963{
964 struct ubifs_gced_idx_leb *idx_gc;
965 int lnum;
966
967 if (list_empty(&c->idx_gc))
968 return -ENOSPC;
969 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
970 lnum = idx_gc->lnum;
971
972 list_del(&idx_gc->list);
973 kfree(idx_gc);
974 return lnum;
975}
976#endif
977