1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#ifndef __UBOOT__
44#include <log.h>
45#include <dm/devres.h>
46#include <linux/slab.h>
47#include <linux/pagemap.h>
48#include <linux/list_sort.h>
49#endif
50#include "ubifs.h"
51
52#ifndef __UBOOT__
53
54
55
56
57
58#define SOFT_LEBS_LIMIT 4
59#define HARD_LEBS_LIMIT 32
60
61
62
63
64
65
66
67
68
69
70
71
72
73static int switch_gc_head(struct ubifs_info *c)
74{
75 int err, gc_lnum = c->gc_lnum;
76 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
77
78 ubifs_assert(gc_lnum != -1);
79 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
80 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
81 c->leb_size - wbuf->offs - wbuf->used);
82
83 err = ubifs_wbuf_sync_nolock(wbuf);
84 if (err)
85 return err;
86
87
88
89
90
91 err = ubifs_leb_unmap(c, gc_lnum);
92 if (err)
93 return err;
94
95 err = ubifs_wbuf_sync_nolock(wbuf);
96 if (err)
97 return err;
98
99 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
100 if (err)
101 return err;
102
103 c->gc_lnum = -1;
104 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
105 return err;
106}
107
108
109
110
111
112
113
114
115
116
117static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
118{
119 ino_t inuma, inumb;
120 struct ubifs_info *c = priv;
121 struct ubifs_scan_node *sa, *sb;
122
123 cond_resched();
124 if (a == b)
125 return 0;
126
127 sa = list_entry(a, struct ubifs_scan_node, list);
128 sb = list_entry(b, struct ubifs_scan_node, list);
129
130 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
131 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
132 ubifs_assert(sa->type == UBIFS_DATA_NODE);
133 ubifs_assert(sb->type == UBIFS_DATA_NODE);
134
135 inuma = key_inum(c, &sa->key);
136 inumb = key_inum(c, &sb->key);
137
138 if (inuma == inumb) {
139 unsigned int blka = key_block(c, &sa->key);
140 unsigned int blkb = key_block(c, &sb->key);
141
142 if (blka <= blkb)
143 return -1;
144 } else if (inuma <= inumb)
145 return -1;
146
147 return 1;
148}
149
150
151
152
153
154
155
156
157
158
159
160static int nondata_nodes_cmp(void *priv, struct list_head *a,
161 struct list_head *b)
162{
163 ino_t inuma, inumb;
164 struct ubifs_info *c = priv;
165 struct ubifs_scan_node *sa, *sb;
166
167 cond_resched();
168 if (a == b)
169 return 0;
170
171 sa = list_entry(a, struct ubifs_scan_node, list);
172 sb = list_entry(b, struct ubifs_scan_node, list);
173
174 ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
175 key_type(c, &sb->key) != UBIFS_DATA_KEY);
176 ubifs_assert(sa->type != UBIFS_DATA_NODE &&
177 sb->type != UBIFS_DATA_NODE);
178
179
180 if (sa->type == UBIFS_INO_NODE) {
181 if (sb->type == UBIFS_INO_NODE)
182 return sb->len - sa->len;
183 return -1;
184 }
185 if (sb->type == UBIFS_INO_NODE)
186 return 1;
187
188 ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
189 key_type(c, &sa->key) == UBIFS_XENT_KEY);
190 ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
191 key_type(c, &sb->key) == UBIFS_XENT_KEY);
192 ubifs_assert(sa->type == UBIFS_DENT_NODE ||
193 sa->type == UBIFS_XENT_NODE);
194 ubifs_assert(sb->type == UBIFS_DENT_NODE ||
195 sb->type == UBIFS_XENT_NODE);
196
197 inuma = key_inum(c, &sa->key);
198 inumb = key_inum(c, &sb->key);
199
200 if (inuma == inumb) {
201 uint32_t hasha = key_hash(c, &sa->key);
202 uint32_t hashb = key_hash(c, &sb->key);
203
204 if (hasha <= hashb)
205 return -1;
206 } else if (inuma <= inumb)
207 return -1;
208
209 return 1;
210}
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
240 struct list_head *nondata, int *min)
241{
242 int err;
243 struct ubifs_scan_node *snod, *tmp;
244
245 *min = INT_MAX;
246
247
248 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
249 ubifs_assert(snod->type == UBIFS_INO_NODE ||
250 snod->type == UBIFS_DATA_NODE ||
251 snod->type == UBIFS_DENT_NODE ||
252 snod->type == UBIFS_XENT_NODE ||
253 snod->type == UBIFS_TRUN_NODE);
254
255 if (snod->type != UBIFS_INO_NODE &&
256 snod->type != UBIFS_DATA_NODE &&
257 snod->type != UBIFS_DENT_NODE &&
258 snod->type != UBIFS_XENT_NODE) {
259
260 list_del(&snod->list);
261 kfree(snod);
262 continue;
263 }
264
265 ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
266 key_type(c, &snod->key) == UBIFS_INO_KEY ||
267 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
268 key_type(c, &snod->key) == UBIFS_XENT_KEY);
269
270 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
271 snod->offs, 0);
272 if (err < 0)
273 return err;
274
275 if (!err) {
276
277 list_del(&snod->list);
278 kfree(snod);
279 continue;
280 }
281
282 if (snod->len < *min)
283 *min = snod->len;
284
285 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
286 list_move_tail(&snod->list, nondata);
287 }
288
289
290 list_sort(c, &sleb->nodes, &data_nodes_cmp);
291 list_sort(c, nondata, &nondata_nodes_cmp);
292
293 err = dbg_check_data_nodes_order(c, &sleb->nodes);
294 if (err)
295 return err;
296 err = dbg_check_nondata_nodes_order(c, nondata);
297 if (err)
298 return err;
299 return 0;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
314 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
315{
316 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
317
318 cond_resched();
319 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
320 if (err)
321 return err;
322
323 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
324 snod->offs, new_lnum, new_offs,
325 snod->len);
326 list_del(&snod->list);
327 kfree(snod);
328 return err;
329}
330
331
332
333
334
335
336
337
338
339
340
341static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
342{
343 int err, min;
344 LIST_HEAD(nondata);
345 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
346
347 if (wbuf->lnum == -1) {
348
349
350
351
352 err = switch_gc_head(c);
353 if (err)
354 return err;
355 }
356
357 err = sort_nodes(c, sleb, &nondata, &min);
358 if (err)
359 goto out;
360
361
362 while (1) {
363 int avail;
364 struct ubifs_scan_node *snod, *tmp;
365
366
367 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
368 avail = c->leb_size - wbuf->offs - wbuf->used;
369 if (snod->len > avail)
370
371
372
373
374 break;
375
376 err = move_node(c, sleb, snod, wbuf);
377 if (err)
378 goto out;
379 }
380
381
382 list_for_each_entry_safe(snod, tmp, &nondata, list) {
383 avail = c->leb_size - wbuf->offs - wbuf->used;
384 if (avail < min)
385 break;
386
387 if (snod->len > avail) {
388
389
390
391
392
393
394
395 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
396 snod->len == UBIFS_INO_NODE_SZ)
397 break;
398 continue;
399 }
400
401 err = move_node(c, sleb, snod, wbuf);
402 if (err)
403 goto out;
404 }
405
406 if (list_empty(&sleb->nodes) && list_empty(&nondata))
407 break;
408
409
410
411
412
413 err = switch_gc_head(c);
414 if (err)
415 goto out;
416 }
417
418 return 0;
419
420out:
421 list_splice_tail(&nondata, &sleb->nodes);
422 return err;
423}
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438static int gc_sync_wbufs(struct ubifs_info *c)
439{
440 int err, i;
441
442 for (i = 0; i < c->jhead_cnt; i++) {
443 if (i == GCHD)
444 continue;
445 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
446 if (err)
447 return err;
448 }
449 return 0;
450}
451
452
453
454
455
456
457
458
459
460
461int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
462{
463 struct ubifs_scan_leb *sleb;
464 struct ubifs_scan_node *snod;
465 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
466 int err = 0, lnum = lp->lnum;
467
468 ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
469 c->need_recovery);
470 ubifs_assert(c->gc_lnum != lnum);
471 ubifs_assert(wbuf->lnum != lnum);
472
473 if (lp->free + lp->dirty == c->leb_size) {
474
475 dbg_gc("LEB %d is free, return it", lp->lnum);
476 ubifs_assert(!(lp->flags & LPROPS_INDEX));
477
478 if (lp->free != c->leb_size) {
479
480
481
482
483
484 err = gc_sync_wbufs(c);
485 if (err)
486 return err;
487 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
488 0, 0, 0, 0);
489 if (err)
490 return err;
491 }
492 err = ubifs_leb_unmap(c, lp->lnum);
493 if (err)
494 return err;
495
496 if (c->gc_lnum == -1) {
497 c->gc_lnum = lnum;
498 return LEB_RETAINED;
499 }
500
501 return LEB_FREED;
502 }
503
504
505
506
507
508 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
509 if (IS_ERR(sleb))
510 return PTR_ERR(sleb);
511
512 ubifs_assert(!list_empty(&sleb->nodes));
513 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
514
515 if (snod->type == UBIFS_IDX_NODE) {
516 struct ubifs_gced_idx_leb *idx_gc;
517
518 dbg_gc("indexing LEB %d (free %d, dirty %d)",
519 lnum, lp->free, lp->dirty);
520 list_for_each_entry(snod, &sleb->nodes, list) {
521 struct ubifs_idx_node *idx = snod->node;
522 int level = le16_to_cpu(idx->level);
523
524 ubifs_assert(snod->type == UBIFS_IDX_NODE);
525 key_read(c, ubifs_idx_key(c, idx), &snod->key);
526 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
527 snod->offs);
528 if (err)
529 goto out;
530 }
531
532 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
533 if (!idx_gc) {
534 err = -ENOMEM;
535 goto out;
536 }
537
538 idx_gc->lnum = lnum;
539 idx_gc->unmap = 0;
540 list_add(&idx_gc->list, &c->idx_gc);
541
542
543
544
545
546
547
548 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
549 LPROPS_INDEX, 1);
550 if (err)
551 goto out;
552 err = LEB_FREED_IDX;
553 } else {
554 dbg_gc("data LEB %d (free %d, dirty %d)",
555 lnum, lp->free, lp->dirty);
556
557 err = move_nodes(c, sleb);
558 if (err)
559 goto out_inc_seq;
560
561 err = gc_sync_wbufs(c);
562 if (err)
563 goto out_inc_seq;
564
565 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
566 if (err)
567 goto out_inc_seq;
568
569
570 c->gced_lnum = lnum;
571 smp_wmb();
572 c->gc_seq += 1;
573 smp_wmb();
574
575 if (c->gc_lnum == -1) {
576 c->gc_lnum = lnum;
577 err = LEB_RETAINED;
578 } else {
579 err = ubifs_wbuf_sync_nolock(wbuf);
580 if (err)
581 goto out;
582
583 err = ubifs_leb_unmap(c, lnum);
584 if (err)
585 goto out;
586
587 err = LEB_FREED;
588 }
589 }
590
591out:
592 ubifs_scan_destroy(sleb);
593 return err;
594
595out_inc_seq:
596
597 c->gced_lnum = lnum;
598 smp_wmb();
599 c->gc_seq += 1;
600 smp_wmb();
601 goto out;
602}
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
641{
642 int i, err, ret, min_space = c->dead_wm;
643 struct ubifs_lprops lp;
644 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
645
646 ubifs_assert_cmt_locked(c);
647 ubifs_assert(!c->ro_media && !c->ro_mount);
648
649 if (ubifs_gc_should_commit(c))
650 return -EAGAIN;
651
652 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
653
654 if (c->ro_error) {
655 ret = -EROFS;
656 goto out_unlock;
657 }
658
659
660 ubifs_assert(!wbuf->used);
661
662 for (i = 0; ; i++) {
663 int space_before, space_after;
664
665 cond_resched();
666
667
668 if (ubifs_gc_should_commit(c)) {
669 ret = -EAGAIN;
670 break;
671 }
672
673 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
674
675
676
677
678 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
679 ubifs_commit_required(c);
680 ret = -EAGAIN;
681 break;
682 }
683
684 if (i > HARD_LEBS_LIMIT) {
685
686
687
688
689 dbg_gc("hard limit, -ENOSPC");
690 ret = -ENOSPC;
691 break;
692 }
693
694
695
696
697
698
699
700
701 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
702 if (ret) {
703 if (ret == -ENOSPC)
704 dbg_gc("no more dirty LEBs");
705 break;
706 }
707
708 dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
709 lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
710 min_space);
711
712 space_before = c->leb_size - wbuf->offs - wbuf->used;
713 if (wbuf->lnum == -1)
714 space_before = 0;
715
716 ret = ubifs_garbage_collect_leb(c, &lp);
717 if (ret < 0) {
718 if (ret == -EAGAIN) {
719
720
721
722
723
724
725 err = ubifs_return_leb(c, lp.lnum);
726 if (err)
727 ret = err;
728 break;
729 }
730 goto out;
731 }
732
733 if (ret == LEB_FREED) {
734
735 dbg_gc("LEB %d freed, return", lp.lnum);
736 ret = lp.lnum;
737 break;
738 }
739
740 if (ret == LEB_FREED_IDX) {
741
742
743
744
745
746
747 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
748 continue;
749 }
750
751 ubifs_assert(ret == LEB_RETAINED);
752 space_after = c->leb_size - wbuf->offs - wbuf->used;
753 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
754 space_after - space_before);
755
756 if (space_after > space_before) {
757
758 min_space >>= 1;
759 if (min_space < c->dead_wm)
760 min_space = c->dead_wm;
761 continue;
762 }
763
764 dbg_gc("did not make progress");
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782 if (i < SOFT_LEBS_LIMIT) {
783 dbg_gc("try again");
784 continue;
785 }
786
787 min_space <<= 1;
788 if (min_space > c->dark_wm)
789 min_space = c->dark_wm;
790 dbg_gc("set min. space to %d", min_space);
791 }
792
793 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
794 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
795 ubifs_commit_required(c);
796 ret = -EAGAIN;
797 }
798
799 err = ubifs_wbuf_sync_nolock(wbuf);
800 if (!err)
801 err = ubifs_leb_unmap(c, c->gc_lnum);
802 if (err) {
803 ret = err;
804 goto out;
805 }
806out_unlock:
807 mutex_unlock(&wbuf->io_mutex);
808 return ret;
809
810out:
811 ubifs_assert(ret < 0);
812 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
813 ubifs_wbuf_sync_nolock(wbuf);
814 ubifs_ro_mode(c, ret);
815 mutex_unlock(&wbuf->io_mutex);
816 ubifs_return_leb(c, lp.lnum);
817 return ret;
818}
819
820
821
822
823
824
825
826
827
828
829
830
831int ubifs_gc_start_commit(struct ubifs_info *c)
832{
833 struct ubifs_gced_idx_leb *idx_gc;
834 const struct ubifs_lprops *lp;
835 int err = 0, flags;
836
837 ubifs_get_lprops(c);
838
839
840
841
842
843 while (1) {
844 lp = ubifs_fast_find_freeable(c);
845 if (IS_ERR(lp)) {
846 err = PTR_ERR(lp);
847 goto out;
848 }
849 if (!lp)
850 break;
851 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
852 ubifs_assert(!(lp->flags & LPROPS_INDEX));
853 err = ubifs_leb_unmap(c, lp->lnum);
854 if (err)
855 goto out;
856 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
857 if (IS_ERR(lp)) {
858 err = PTR_ERR(lp);
859 goto out;
860 }
861 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
862 ubifs_assert(!(lp->flags & LPROPS_INDEX));
863 }
864
865
866 list_for_each_entry(idx_gc, &c->idx_gc, list)
867 idx_gc->unmap = 1;
868
869
870 while (1) {
871 lp = ubifs_fast_find_frdi_idx(c);
872 if (IS_ERR(lp)) {
873 err = PTR_ERR(lp);
874 goto out;
875 }
876 if (!lp)
877 break;
878 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
879 if (!idx_gc) {
880 err = -ENOMEM;
881 goto out;
882 }
883 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
884 ubifs_assert(lp->flags & LPROPS_INDEX);
885
886 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
887 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
888 if (IS_ERR(lp)) {
889 err = PTR_ERR(lp);
890 kfree(idx_gc);
891 goto out;
892 }
893 ubifs_assert(lp->flags & LPROPS_TAKEN);
894 ubifs_assert(!(lp->flags & LPROPS_INDEX));
895 idx_gc->lnum = lp->lnum;
896 idx_gc->unmap = 1;
897 list_add(&idx_gc->list, &c->idx_gc);
898 }
899out:
900 ubifs_release_lprops(c);
901 return err;
902}
903
904
905
906
907
908
909
910int ubifs_gc_end_commit(struct ubifs_info *c)
911{
912 struct ubifs_gced_idx_leb *idx_gc, *tmp;
913 struct ubifs_wbuf *wbuf;
914 int err = 0;
915
916 wbuf = &c->jheads[GCHD].wbuf;
917 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
918 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
919 if (idx_gc->unmap) {
920 dbg_gc("LEB %d", idx_gc->lnum);
921 err = ubifs_leb_unmap(c, idx_gc->lnum);
922 if (err)
923 goto out;
924 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
925 LPROPS_NC, 0, LPROPS_TAKEN, -1);
926 if (err)
927 goto out;
928 list_del(&idx_gc->list);
929 kfree(idx_gc);
930 }
931out:
932 mutex_unlock(&wbuf->io_mutex);
933 return err;
934}
935#endif
936
937
938
939
940
941
942
943
944void ubifs_destroy_idx_gc(struct ubifs_info *c)
945{
946 while (!list_empty(&c->idx_gc)) {
947 struct ubifs_gced_idx_leb *idx_gc;
948
949 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
950 list);
951 c->idx_gc_cnt -= 1;
952 list_del(&idx_gc->list);
953 kfree(idx_gc);
954 }
955}
956#ifndef __UBOOT__
957
958
959
960
961
962
963int ubifs_get_idx_gc_leb(struct ubifs_info *c)
964{
965 struct ubifs_gced_idx_leb *idx_gc;
966 int lnum;
967
968 if (list_empty(&c->idx_gc))
969 return -ENOSPC;
970 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
971 lnum = idx_gc->lnum;
972
973 list_del(&idx_gc->list);
974 kfree(idx_gc);
975 return lnum;
976}
977#endif
978