1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#include <linux/slab.h>
57#include <linux/pagemap.h>
58#include <linux/list_sort.h>
59#include "ubifs.h"
60
61
62
63
64
65
66#define SOFT_LEBS_LIMIT 4
67#define HARD_LEBS_LIMIT 32
68
69
70
71
72
73
74
75
76
77
78
79
80
81static int switch_gc_head(struct ubifs_info *c)
82{
83 int err, gc_lnum = c->gc_lnum;
84 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
85
86 ubifs_assert(gc_lnum != -1);
87 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
88 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
89 c->leb_size - wbuf->offs - wbuf->used);
90
91 err = ubifs_wbuf_sync_nolock(wbuf);
92 if (err)
93 return err;
94
95
96
97
98
99 err = ubifs_leb_unmap(c, gc_lnum);
100 if (err)
101 return err;
102
103 err = ubifs_wbuf_sync_nolock(wbuf);
104 if (err)
105 return err;
106
107 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
108 if (err)
109 return err;
110
111 c->gc_lnum = -1;
112 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
113 return err;
114}
115
116
117
118
119
120
121
122
123
124
125static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
126{
127 ino_t inuma, inumb;
128 struct ubifs_info *c = priv;
129 struct ubifs_scan_node *sa, *sb;
130
131 cond_resched();
132 if (a == b)
133 return 0;
134
135 sa = list_entry(a, struct ubifs_scan_node, list);
136 sb = list_entry(b, struct ubifs_scan_node, list);
137
138 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
139 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
140 ubifs_assert(sa->type == UBIFS_DATA_NODE);
141 ubifs_assert(sb->type == UBIFS_DATA_NODE);
142
143 inuma = key_inum(c, &sa->key);
144 inumb = key_inum(c, &sb->key);
145
146 if (inuma == inumb) {
147 unsigned int blka = key_block(c, &sa->key);
148 unsigned int blkb = key_block(c, &sb->key);
149
150 if (blka <= blkb)
151 return -1;
152 } else if (inuma <= inumb)
153 return -1;
154
155 return 1;
156}
157
158
159
160
161
162
163
164
165
166
167
168static int nondata_nodes_cmp(void *priv, struct list_head *a,
169 struct list_head *b)
170{
171 ino_t inuma, inumb;
172 struct ubifs_info *c = priv;
173 struct ubifs_scan_node *sa, *sb;
174
175 cond_resched();
176 if (a == b)
177 return 0;
178
179 sa = list_entry(a, struct ubifs_scan_node, list);
180 sb = list_entry(b, struct ubifs_scan_node, list);
181
182 ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
183 key_type(c, &sb->key) != UBIFS_DATA_KEY);
184 ubifs_assert(sa->type != UBIFS_DATA_NODE &&
185 sb->type != UBIFS_DATA_NODE);
186
187
188 if (sa->type == UBIFS_INO_NODE) {
189 if (sb->type == UBIFS_INO_NODE)
190 return sb->len - sa->len;
191 return -1;
192 }
193 if (sb->type == UBIFS_INO_NODE)
194 return 1;
195
196 ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
197 key_type(c, &sa->key) == UBIFS_XENT_KEY);
198 ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
199 key_type(c, &sb->key) == UBIFS_XENT_KEY);
200 ubifs_assert(sa->type == UBIFS_DENT_NODE ||
201 sa->type == UBIFS_XENT_NODE);
202 ubifs_assert(sb->type == UBIFS_DENT_NODE ||
203 sb->type == UBIFS_XENT_NODE);
204
205 inuma = key_inum(c, &sa->key);
206 inumb = key_inum(c, &sb->key);
207
208 if (inuma == inumb) {
209 uint32_t hasha = key_hash(c, &sa->key);
210 uint32_t hashb = key_hash(c, &sb->key);
211
212 if (hasha <= hashb)
213 return -1;
214 } else if (inuma <= inumb)
215 return -1;
216
217 return 1;
218}
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
248 struct list_head *nondata, int *min)
249{
250 int err;
251 struct ubifs_scan_node *snod, *tmp;
252
253 *min = INT_MAX;
254
255
256 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
257 ubifs_assert(snod->type == UBIFS_INO_NODE ||
258 snod->type == UBIFS_DATA_NODE ||
259 snod->type == UBIFS_DENT_NODE ||
260 snod->type == UBIFS_XENT_NODE ||
261 snod->type == UBIFS_TRUN_NODE);
262
263 if (snod->type != UBIFS_INO_NODE &&
264 snod->type != UBIFS_DATA_NODE &&
265 snod->type != UBIFS_DENT_NODE &&
266 snod->type != UBIFS_XENT_NODE) {
267
268 list_del(&snod->list);
269 kfree(snod);
270 continue;
271 }
272
273 ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
274 key_type(c, &snod->key) == UBIFS_INO_KEY ||
275 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
276 key_type(c, &snod->key) == UBIFS_XENT_KEY);
277
278 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
279 snod->offs, 0);
280 if (err < 0)
281 return err;
282
283 if (!err) {
284
285 list_del(&snod->list);
286 kfree(snod);
287 continue;
288 }
289
290 if (snod->len < *min)
291 *min = snod->len;
292
293 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
294 list_move_tail(&snod->list, nondata);
295 }
296
297
298 list_sort(c, &sleb->nodes, &data_nodes_cmp);
299 list_sort(c, nondata, &nondata_nodes_cmp);
300
301 err = dbg_check_data_nodes_order(c, &sleb->nodes);
302 if (err)
303 return err;
304 err = dbg_check_nondata_nodes_order(c, nondata);
305 if (err)
306 return err;
307 return 0;
308}
309
310
311
312
313
314
315
316
317
318
319
320
321static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
322 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
323{
324 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
325
326 cond_resched();
327 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
328 if (err)
329 return err;
330
331 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
332 snod->offs, new_lnum, new_offs,
333 snod->len);
334 list_del(&snod->list);
335 kfree(snod);
336 return err;
337}
338
339
340
341
342
343
344
345
346
347
348
349static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
350{
351 int err, min;
352 LIST_HEAD(nondata);
353 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
354
355 if (wbuf->lnum == -1) {
356
357
358
359
360 err = switch_gc_head(c);
361 if (err)
362 return err;
363 }
364
365 err = sort_nodes(c, sleb, &nondata, &min);
366 if (err)
367 goto out;
368
369
370 while (1) {
371 int avail;
372 struct ubifs_scan_node *snod, *tmp;
373
374
375 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
376 avail = c->leb_size - wbuf->offs - wbuf->used;
377 if (snod->len > avail)
378
379
380
381
382 break;
383
384 err = move_node(c, sleb, snod, wbuf);
385 if (err)
386 goto out;
387 }
388
389
390 list_for_each_entry_safe(snod, tmp, &nondata, list) {
391 avail = c->leb_size - wbuf->offs - wbuf->used;
392 if (avail < min)
393 break;
394
395 if (snod->len > avail) {
396
397
398
399
400
401
402
403 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
404 snod->len == UBIFS_INO_NODE_SZ)
405 break;
406 continue;
407 }
408
409 err = move_node(c, sleb, snod, wbuf);
410 if (err)
411 goto out;
412 }
413
414 if (list_empty(&sleb->nodes) && list_empty(&nondata))
415 break;
416
417
418
419
420
421 err = switch_gc_head(c);
422 if (err)
423 goto out;
424 }
425
426 return 0;
427
428out:
429 list_splice_tail(&nondata, &sleb->nodes);
430 return err;
431}
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446static int gc_sync_wbufs(struct ubifs_info *c)
447{
448 int err, i;
449
450 for (i = 0; i < c->jhead_cnt; i++) {
451 if (i == GCHD)
452 continue;
453 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
454 if (err)
455 return err;
456 }
457 return 0;
458}
459
460
461
462
463
464
465
466
467
468
469int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
470{
471 struct ubifs_scan_leb *sleb;
472 struct ubifs_scan_node *snod;
473 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
474 int err = 0, lnum = lp->lnum;
475
476 ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
477 c->need_recovery);
478 ubifs_assert(c->gc_lnum != lnum);
479 ubifs_assert(wbuf->lnum != lnum);
480
481 if (lp->free + lp->dirty == c->leb_size) {
482
483 dbg_gc("LEB %d is free, return it", lp->lnum);
484 ubifs_assert(!(lp->flags & LPROPS_INDEX));
485
486 if (lp->free != c->leb_size) {
487
488
489
490
491
492 err = gc_sync_wbufs(c);
493 if (err)
494 return err;
495 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
496 0, 0, 0, 0);
497 if (err)
498 return err;
499 }
500 err = ubifs_leb_unmap(c, lp->lnum);
501 if (err)
502 return err;
503
504 if (c->gc_lnum == -1) {
505 c->gc_lnum = lnum;
506 return LEB_RETAINED;
507 }
508
509 return LEB_FREED;
510 }
511
512
513
514
515
516 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
517 if (IS_ERR(sleb))
518 return PTR_ERR(sleb);
519
520 ubifs_assert(!list_empty(&sleb->nodes));
521 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
522
523 if (snod->type == UBIFS_IDX_NODE) {
524 struct ubifs_gced_idx_leb *idx_gc;
525
526 dbg_gc("indexing LEB %d (free %d, dirty %d)",
527 lnum, lp->free, lp->dirty);
528 list_for_each_entry(snod, &sleb->nodes, list) {
529 struct ubifs_idx_node *idx = snod->node;
530 int level = le16_to_cpu(idx->level);
531
532 ubifs_assert(snod->type == UBIFS_IDX_NODE);
533 key_read(c, ubifs_idx_key(c, idx), &snod->key);
534 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
535 snod->offs);
536 if (err)
537 goto out;
538 }
539
540 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
541 if (!idx_gc) {
542 err = -ENOMEM;
543 goto out;
544 }
545
546 idx_gc->lnum = lnum;
547 idx_gc->unmap = 0;
548 list_add(&idx_gc->list, &c->idx_gc);
549
550
551
552
553
554
555
556 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
557 LPROPS_INDEX, 1);
558 if (err)
559 goto out;
560 err = LEB_FREED_IDX;
561 } else {
562 dbg_gc("data LEB %d (free %d, dirty %d)",
563 lnum, lp->free, lp->dirty);
564
565 err = move_nodes(c, sleb);
566 if (err)
567 goto out_inc_seq;
568
569 err = gc_sync_wbufs(c);
570 if (err)
571 goto out_inc_seq;
572
573 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
574 if (err)
575 goto out_inc_seq;
576
577
578 c->gced_lnum = lnum;
579 smp_wmb();
580 c->gc_seq += 1;
581 smp_wmb();
582
583 if (c->gc_lnum == -1) {
584 c->gc_lnum = lnum;
585 err = LEB_RETAINED;
586 } else {
587 err = ubifs_wbuf_sync_nolock(wbuf);
588 if (err)
589 goto out;
590
591 err = ubifs_leb_unmap(c, lnum);
592 if (err)
593 goto out;
594
595 err = LEB_FREED;
596 }
597 }
598
599out:
600 ubifs_scan_destroy(sleb);
601 return err;
602
603out_inc_seq:
604
605 c->gced_lnum = lnum;
606 smp_wmb();
607 c->gc_seq += 1;
608 smp_wmb();
609 goto out;
610}
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
649{
650 int i, err, ret, min_space = c->dead_wm;
651 struct ubifs_lprops lp;
652 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
653
654 ubifs_assert_cmt_locked(c);
655 ubifs_assert(!c->ro_media && !c->ro_mount);
656
657 if (ubifs_gc_should_commit(c))
658 return -EAGAIN;
659
660 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
661
662 if (c->ro_error) {
663 ret = -EROFS;
664 goto out_unlock;
665 }
666
667
668 ubifs_assert(!wbuf->used);
669
670 for (i = 0; ; i++) {
671 int space_before, space_after;
672
673 cond_resched();
674
675
676 if (ubifs_gc_should_commit(c)) {
677 ret = -EAGAIN;
678 break;
679 }
680
681 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
682
683
684
685
686 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
687 ubifs_commit_required(c);
688 ret = -EAGAIN;
689 break;
690 }
691
692 if (i > HARD_LEBS_LIMIT) {
693
694
695
696
697 dbg_gc("hard limit, -ENOSPC");
698 ret = -ENOSPC;
699 break;
700 }
701
702
703
704
705
706
707
708
709 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
710 if (ret) {
711 if (ret == -ENOSPC)
712 dbg_gc("no more dirty LEBs");
713 break;
714 }
715
716 dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
717 lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
718 min_space);
719
720 space_before = c->leb_size - wbuf->offs - wbuf->used;
721 if (wbuf->lnum == -1)
722 space_before = 0;
723
724 ret = ubifs_garbage_collect_leb(c, &lp);
725 if (ret < 0) {
726 if (ret == -EAGAIN) {
727
728
729
730
731
732
733 err = ubifs_return_leb(c, lp.lnum);
734 if (err)
735 ret = err;
736 break;
737 }
738 goto out;
739 }
740
741 if (ret == LEB_FREED) {
742
743 dbg_gc("LEB %d freed, return", lp.lnum);
744 ret = lp.lnum;
745 break;
746 }
747
748 if (ret == LEB_FREED_IDX) {
749
750
751
752
753
754
755 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
756 continue;
757 }
758
759 ubifs_assert(ret == LEB_RETAINED);
760 space_after = c->leb_size - wbuf->offs - wbuf->used;
761 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
762 space_after - space_before);
763
764 if (space_after > space_before) {
765
766 min_space >>= 1;
767 if (min_space < c->dead_wm)
768 min_space = c->dead_wm;
769 continue;
770 }
771
772 dbg_gc("did not make progress");
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790 if (i < SOFT_LEBS_LIMIT) {
791 dbg_gc("try again");
792 continue;
793 }
794
795 min_space <<= 1;
796 if (min_space > c->dark_wm)
797 min_space = c->dark_wm;
798 dbg_gc("set min. space to %d", min_space);
799 }
800
801 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
802 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
803 ubifs_commit_required(c);
804 ret = -EAGAIN;
805 }
806
807 err = ubifs_wbuf_sync_nolock(wbuf);
808 if (!err)
809 err = ubifs_leb_unmap(c, c->gc_lnum);
810 if (err) {
811 ret = err;
812 goto out;
813 }
814out_unlock:
815 mutex_unlock(&wbuf->io_mutex);
816 return ret;
817
818out:
819 ubifs_assert(ret < 0);
820 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
821 ubifs_wbuf_sync_nolock(wbuf);
822 ubifs_ro_mode(c, ret);
823 mutex_unlock(&wbuf->io_mutex);
824 ubifs_return_leb(c, lp.lnum);
825 return ret;
826}
827
828
829
830
831
832
833
834
835
836
837
838
839int ubifs_gc_start_commit(struct ubifs_info *c)
840{
841 struct ubifs_gced_idx_leb *idx_gc;
842 const struct ubifs_lprops *lp;
843 int err = 0, flags;
844
845 ubifs_get_lprops(c);
846
847
848
849
850
851 while (1) {
852 lp = ubifs_fast_find_freeable(c);
853 if (IS_ERR(lp)) {
854 err = PTR_ERR(lp);
855 goto out;
856 }
857 if (!lp)
858 break;
859 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
860 ubifs_assert(!(lp->flags & LPROPS_INDEX));
861 err = ubifs_leb_unmap(c, lp->lnum);
862 if (err)
863 goto out;
864 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
865 if (IS_ERR(lp)) {
866 err = PTR_ERR(lp);
867 goto out;
868 }
869 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
870 ubifs_assert(!(lp->flags & LPROPS_INDEX));
871 }
872
873
874 list_for_each_entry(idx_gc, &c->idx_gc, list)
875 idx_gc->unmap = 1;
876
877
878 while (1) {
879 lp = ubifs_fast_find_frdi_idx(c);
880 if (IS_ERR(lp)) {
881 err = PTR_ERR(lp);
882 goto out;
883 }
884 if (!lp)
885 break;
886 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
887 if (!idx_gc) {
888 err = -ENOMEM;
889 goto out;
890 }
891 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
892 ubifs_assert(lp->flags & LPROPS_INDEX);
893
894 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
895 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
896 if (IS_ERR(lp)) {
897 err = PTR_ERR(lp);
898 kfree(idx_gc);
899 goto out;
900 }
901 ubifs_assert(lp->flags & LPROPS_TAKEN);
902 ubifs_assert(!(lp->flags & LPROPS_INDEX));
903 idx_gc->lnum = lp->lnum;
904 idx_gc->unmap = 1;
905 list_add(&idx_gc->list, &c->idx_gc);
906 }
907out:
908 ubifs_release_lprops(c);
909 return err;
910}
911
912
913
914
915
916
917
918int ubifs_gc_end_commit(struct ubifs_info *c)
919{
920 struct ubifs_gced_idx_leb *idx_gc, *tmp;
921 struct ubifs_wbuf *wbuf;
922 int err = 0;
923
924 wbuf = &c->jheads[GCHD].wbuf;
925 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
926 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
927 if (idx_gc->unmap) {
928 dbg_gc("LEB %d", idx_gc->lnum);
929 err = ubifs_leb_unmap(c, idx_gc->lnum);
930 if (err)
931 goto out;
932 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
933 LPROPS_NC, 0, LPROPS_TAKEN, -1);
934 if (err)
935 goto out;
936 list_del(&idx_gc->list);
937 kfree(idx_gc);
938 }
939out:
940 mutex_unlock(&wbuf->io_mutex);
941 return err;
942}
943
944
945
946
947
948
949
950
951
952void ubifs_destroy_idx_gc(struct ubifs_info *c)
953{
954 while (!list_empty(&c->idx_gc)) {
955 struct ubifs_gced_idx_leb *idx_gc;
956
957 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
958 list);
959 c->idx_gc_cnt -= 1;
960 list_del(&idx_gc->list);
961 kfree(idx_gc);
962 }
963}
964
965
966
967
968
969
970
971int ubifs_get_idx_gc_leb(struct ubifs_info *c)
972{
973 struct ubifs_gced_idx_leb *idx_gc;
974 int lnum;
975
976 if (list_empty(&c->idx_gc))
977 return -ENOSPC;
978 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
979 lnum = idx_gc->lnum;
980
981 list_del(&idx_gc->list);
982 kfree(idx_gc);
983 return lnum;
984}
985