1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#ifdef __UBOOT__
25#include <linux/compat.h>
26#include <linux/err.h>
27#endif
28#include "ubifs.h"
29#include <linux/bug.h>
30#include <linux/list_sort.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49struct replay_entry {
50 int lnum;
51 int offs;
52 int len;
53 unsigned int deletion:1;
54 unsigned long long sqnum;
55 struct list_head list;
56 union ubifs_key key;
57 union {
58 struct qstr nm;
59 struct {
60 loff_t old_size;
61 loff_t new_size;
62 };
63 };
64};
65
66
67
68
69
70
71
72
73
74struct bud_entry {
75 struct list_head list;
76 struct ubifs_bud *bud;
77 unsigned long long sqnum;
78 int free;
79 int dirty;
80};
81
82
83
84
85
86
87
88
89
90
91static int set_bud_lprops(struct ubifs_info *c, struct bud_entry *b)
92{
93 const struct ubifs_lprops *lp;
94 int err = 0, dirty;
95
96 ubifs_get_lprops(c);
97
98 lp = ubifs_lpt_lookup_dirty(c, b->bud->lnum);
99 if (IS_ERR(lp)) {
100 err = PTR_ERR(lp);
101 goto out;
102 }
103
104 dirty = lp->dirty;
105 if (b->bud->start == 0 && (lp->free != c->leb_size || lp->dirty != 0)) {
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125 dbg_mnt("bud LEB %d was GC'd (%d free, %d dirty)", b->bud->lnum,
126 lp->free, lp->dirty);
127 dbg_gc("bud LEB %d was GC'd (%d free, %d dirty)", b->bud->lnum,
128 lp->free, lp->dirty);
129 dirty -= c->leb_size - lp->free;
130
131
132
133
134
135
136
137 if (dirty != 0)
138 dbg_mnt("LEB %d lp: %d free %d dirty replay: %d free %d dirty",
139 b->bud->lnum, lp->free, lp->dirty, b->free,
140 b->dirty);
141 }
142 lp = ubifs_change_lp(c, lp, b->free, dirty + b->dirty,
143 lp->flags | LPROPS_TAKEN, 0);
144 if (IS_ERR(lp)) {
145 err = PTR_ERR(lp);
146 goto out;
147 }
148
149
150 err = ubifs_wbuf_seek_nolock(&c->jheads[b->bud->jhead].wbuf,
151 b->bud->lnum, c->leb_size - b->free);
152
153out:
154 ubifs_release_lprops(c);
155 return err;
156}
157
158
159
160
161
162
163
164
165static int set_buds_lprops(struct ubifs_info *c)
166{
167 struct bud_entry *b;
168 int err;
169
170 list_for_each_entry(b, &c->replay_buds, list) {
171 err = set_bud_lprops(c, b);
172 if (err)
173 return err;
174 }
175
176 return 0;
177}
178
179
180
181
182
183
184static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
185{
186 unsigned min_blk, max_blk;
187 union ubifs_key min_key, max_key;
188 ino_t ino;
189
190 min_blk = r->new_size / UBIFS_BLOCK_SIZE;
191 if (r->new_size & (UBIFS_BLOCK_SIZE - 1))
192 min_blk += 1;
193
194 max_blk = r->old_size / UBIFS_BLOCK_SIZE;
195 if ((r->old_size & (UBIFS_BLOCK_SIZE - 1)) == 0)
196 max_blk -= 1;
197
198 ino = key_inum(c, &r->key);
199
200 data_key_init(c, &min_key, ino, min_blk);
201 data_key_init(c, &max_key, ino, max_blk);
202
203 return ubifs_tnc_remove_range(c, &min_key, &max_key);
204}
205
206
207
208
209
210
211
212
213static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
214{
215 int err;
216
217 dbg_mntk(&r->key, "LEB %d:%d len %d deletion %d sqnum %llu key ",
218 r->lnum, r->offs, r->len, r->deletion, r->sqnum);
219
220
221 c->replay_sqnum = r->sqnum;
222
223 if (is_hash_key(c, &r->key)) {
224 if (r->deletion)
225 err = ubifs_tnc_remove_nm(c, &r->key, &r->nm);
226 else
227 err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs,
228 r->len, &r->nm);
229 } else {
230 if (r->deletion)
231 switch (key_type(c, &r->key)) {
232 case UBIFS_INO_KEY:
233 {
234 ino_t inum = key_inum(c, &r->key);
235
236 err = ubifs_tnc_remove_ino(c, inum);
237 break;
238 }
239 case UBIFS_TRUN_KEY:
240 err = trun_remove_range(c, r);
241 break;
242 default:
243 err = ubifs_tnc_remove(c, &r->key);
244 break;
245 }
246 else
247 err = ubifs_tnc_add(c, &r->key, r->lnum, r->offs,
248 r->len);
249 if (err)
250 return err;
251
252 if (c->need_recovery)
253 err = ubifs_recover_size_accum(c, &r->key, r->deletion,
254 r->new_size);
255 }
256
257 return err;
258}
259
260
261
262
263
264
265
266
267
268
269
270static int replay_entries_cmp(void *priv, struct list_head *a,
271 struct list_head *b)
272{
273 struct replay_entry *ra, *rb;
274
275 cond_resched();
276 if (a == b)
277 return 0;
278
279 ra = list_entry(a, struct replay_entry, list);
280 rb = list_entry(b, struct replay_entry, list);
281 ubifs_assert(ra->sqnum != rb->sqnum);
282 if (ra->sqnum > rb->sqnum)
283 return 1;
284 return -1;
285}
286
287
288
289
290
291
292
293
294static int apply_replay_list(struct ubifs_info *c)
295{
296 struct replay_entry *r;
297 int err;
298
299 list_sort(c, &c->replay_list, &replay_entries_cmp);
300
301 list_for_each_entry(r, &c->replay_list, list) {
302 cond_resched();
303
304 err = apply_replay_entry(c, r);
305 if (err)
306 return err;
307 }
308
309 return 0;
310}
311
312
313
314
315
316
317
318static void destroy_replay_list(struct ubifs_info *c)
319{
320 struct replay_entry *r, *tmp;
321
322 list_for_each_entry_safe(r, tmp, &c->replay_list, list) {
323 if (is_hash_key(c, &r->key))
324 kfree(r->nm.name);
325 list_del(&r->list);
326 kfree(r);
327 }
328}
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
351 union ubifs_key *key, unsigned long long sqnum,
352 int deletion, int *used, loff_t old_size,
353 loff_t new_size)
354{
355 struct replay_entry *r;
356
357 dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs);
358
359 if (key_inum(c, key) >= c->highest_inum)
360 c->highest_inum = key_inum(c, key);
361
362 r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
363 if (!r)
364 return -ENOMEM;
365
366 if (!deletion)
367 *used += ALIGN(len, 8);
368 r->lnum = lnum;
369 r->offs = offs;
370 r->len = len;
371 r->deletion = !!deletion;
372 r->sqnum = sqnum;
373 key_copy(c, key, &r->key);
374 r->old_size = old_size;
375 r->new_size = new_size;
376
377 list_add_tail(&r->list, &c->replay_list);
378 return 0;
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
399 union ubifs_key *key, const char *name, int nlen,
400 unsigned long long sqnum, int deletion, int *used)
401{
402 struct replay_entry *r;
403 char *nbuf;
404
405 dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs);
406 if (key_inum(c, key) >= c->highest_inum)
407 c->highest_inum = key_inum(c, key);
408
409 r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
410 if (!r)
411 return -ENOMEM;
412
413 nbuf = kmalloc(nlen + 1, GFP_KERNEL);
414 if (!nbuf) {
415 kfree(r);
416 return -ENOMEM;
417 }
418
419 if (!deletion)
420 *used += ALIGN(len, 8);
421 r->lnum = lnum;
422 r->offs = offs;
423 r->len = len;
424 r->deletion = !!deletion;
425 r->sqnum = sqnum;
426 key_copy(c, key, &r->key);
427 r->nm.len = nlen;
428 memcpy(nbuf, name, nlen);
429 nbuf[nlen] = '\0';
430 r->nm.name = nbuf;
431
432 list_add_tail(&r->list, &c->replay_list);
433 return 0;
434}
435
436
437
438
439
440
441
442
443
444int ubifs_validate_entry(struct ubifs_info *c,
445 const struct ubifs_dent_node *dent)
446{
447 int key_type = key_type_flash(c, dent->key);
448 int nlen = le16_to_cpu(dent->nlen);
449
450 if (le32_to_cpu(dent->ch.len) != nlen + UBIFS_DENT_NODE_SZ + 1 ||
451 dent->type >= UBIFS_ITYPES_CNT ||
452 nlen > UBIFS_MAX_NLEN || dent->name[nlen] != 0 ||
453 strnlen(dent->name, nlen) != nlen ||
454 le64_to_cpu(dent->inum) > MAX_INUM) {
455 ubifs_err(c, "bad %s node", key_type == UBIFS_DENT_KEY ?
456 "directory entry" : "extended attribute entry");
457 return -EINVAL;
458 }
459
460 if (key_type != UBIFS_DENT_KEY && key_type != UBIFS_XENT_KEY) {
461 ubifs_err(c, "bad key type %d", key_type);
462 return -EINVAL;
463 }
464
465 return 0;
466}
467
468
469
470
471
472
473
474
475
476
477
478static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
479{
480 struct ubifs_jhead *jh = &c->jheads[bud->jhead];
481 struct ubifs_bud *next;
482 uint32_t data;
483 int err;
484
485 if (list_is_last(&bud->list, &jh->buds_list))
486 return 1;
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515 next = list_entry(bud->list.next, struct ubifs_bud, list);
516 if (!list_is_last(&next->list, &jh->buds_list))
517 return 0;
518
519 err = ubifs_leb_read(c, next->lnum, (char *)&data, next->start, 4, 1);
520 if (err)
521 return 0;
522
523 return data == 0xFFFFFFFF;
524}
525
526
527
528
529
530
531
532
533
534
535static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
536{
537 int is_last = is_last_bud(c, b->bud);
538 int err = 0, used = 0, lnum = b->bud->lnum, offs = b->bud->start;
539 struct ubifs_scan_leb *sleb;
540 struct ubifs_scan_node *snod;
541
542 dbg_mnt("replay bud LEB %d, head %d, offs %d, is_last %d",
543 lnum, b->bud->jhead, offs, is_last);
544
545 if (c->need_recovery && is_last)
546
547
548
549
550
551
552 sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
553 else
554 sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
555 if (IS_ERR(sleb))
556 return PTR_ERR(sleb);
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580 list_for_each_entry(snod, &sleb->nodes, list) {
581 int deletion = 0;
582
583 cond_resched();
584
585 if (snod->sqnum >= SQNUM_WATERMARK) {
586 ubifs_err(c, "file system's life ended");
587 goto out_dump;
588 }
589
590 if (snod->sqnum > c->max_sqnum)
591 c->max_sqnum = snod->sqnum;
592
593 switch (snod->type) {
594 case UBIFS_INO_NODE:
595 {
596 struct ubifs_ino_node *ino = snod->node;
597 loff_t new_size = le64_to_cpu(ino->size);
598
599 if (le32_to_cpu(ino->nlink) == 0)
600 deletion = 1;
601 err = insert_node(c, lnum, snod->offs, snod->len,
602 &snod->key, snod->sqnum, deletion,
603 &used, 0, new_size);
604 break;
605 }
606 case UBIFS_DATA_NODE:
607 {
608 struct ubifs_data_node *dn = snod->node;
609 loff_t new_size = le32_to_cpu(dn->size) +
610 key_block(c, &snod->key) *
611 UBIFS_BLOCK_SIZE;
612
613 err = insert_node(c, lnum, snod->offs, snod->len,
614 &snod->key, snod->sqnum, deletion,
615 &used, 0, new_size);
616 break;
617 }
618 case UBIFS_DENT_NODE:
619 case UBIFS_XENT_NODE:
620 {
621 struct ubifs_dent_node *dent = snod->node;
622
623 err = ubifs_validate_entry(c, dent);
624 if (err)
625 goto out_dump;
626
627 err = insert_dent(c, lnum, snod->offs, snod->len,
628 &snod->key, dent->name,
629 le16_to_cpu(dent->nlen), snod->sqnum,
630 !le64_to_cpu(dent->inum), &used);
631 break;
632 }
633 case UBIFS_TRUN_NODE:
634 {
635 struct ubifs_trun_node *trun = snod->node;
636 loff_t old_size = le64_to_cpu(trun->old_size);
637 loff_t new_size = le64_to_cpu(trun->new_size);
638 union ubifs_key key;
639
640
641 if (old_size < 0 || old_size > c->max_inode_sz ||
642 new_size < 0 || new_size > c->max_inode_sz ||
643 old_size <= new_size) {
644 ubifs_err(c, "bad truncation node");
645 goto out_dump;
646 }
647
648
649
650
651
652 trun_key_init(c, &key, le32_to_cpu(trun->inum));
653 err = insert_node(c, lnum, snod->offs, snod->len,
654 &key, snod->sqnum, 1, &used,
655 old_size, new_size);
656 break;
657 }
658 default:
659 ubifs_err(c, "unexpected node type %d in bud LEB %d:%d",
660 snod->type, lnum, snod->offs);
661 err = -EINVAL;
662 goto out_dump;
663 }
664 if (err)
665 goto out;
666 }
667
668 ubifs_assert(ubifs_search_bud(c, lnum));
669 ubifs_assert(sleb->endpt - offs >= used);
670 ubifs_assert(sleb->endpt % c->min_io_size == 0);
671
672 b->dirty = sleb->endpt - offs - used;
673 b->free = c->leb_size - sleb->endpt;
674 dbg_mnt("bud LEB %d replied: dirty %d, free %d",
675 lnum, b->dirty, b->free);
676
677out:
678 ubifs_scan_destroy(sleb);
679 return err;
680
681out_dump:
682 ubifs_err(c, "bad node is at LEB %d:%d", lnum, snod->offs);
683 ubifs_dump_node(c, snod->node);
684 ubifs_scan_destroy(sleb);
685 return -EINVAL;
686}
687
688
689
690
691
692
693
694
695static int replay_buds(struct ubifs_info *c)
696{
697 struct bud_entry *b;
698 int err;
699 unsigned long long prev_sqnum = 0;
700
701 list_for_each_entry(b, &c->replay_buds, list) {
702 err = replay_bud(c, b);
703 if (err)
704 return err;
705
706 ubifs_assert(b->sqnum > prev_sqnum);
707 prev_sqnum = b->sqnum;
708 }
709
710 return 0;
711}
712
713
714
715
716
717static void destroy_bud_list(struct ubifs_info *c)
718{
719 struct bud_entry *b;
720
721 while (!list_empty(&c->replay_buds)) {
722 b = list_entry(c->replay_buds.next, struct bud_entry, list);
723 list_del(&b->list);
724 kfree(b);
725 }
726}
727
728
729
730
731
732
733
734
735
736
737
738
739static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
740 unsigned long long sqnum)
741{
742 struct ubifs_bud *bud;
743 struct bud_entry *b;
744
745 dbg_mnt("add replay bud LEB %d:%d, head %d", lnum, offs, jhead);
746
747 bud = kmalloc(sizeof(struct ubifs_bud), GFP_KERNEL);
748 if (!bud)
749 return -ENOMEM;
750
751 b = kmalloc(sizeof(struct bud_entry), GFP_KERNEL);
752 if (!b) {
753 kfree(bud);
754 return -ENOMEM;
755 }
756
757 bud->lnum = lnum;
758 bud->start = offs;
759 bud->jhead = jhead;
760 ubifs_add_bud(c, bud);
761
762 b->bud = bud;
763 b->sqnum = sqnum;
764 list_add_tail(&b->list, &c->replay_buds);
765
766 return 0;
767}
768
769
770
771
772
773
774
775
776
777
778
779
780static int validate_ref(struct ubifs_info *c, const struct ubifs_ref_node *ref)
781{
782 struct ubifs_bud *bud;
783 int lnum = le32_to_cpu(ref->lnum);
784 unsigned int offs = le32_to_cpu(ref->offs);
785 unsigned int jhead = le32_to_cpu(ref->jhead);
786
787
788
789
790
791
792 if (jhead >= c->jhead_cnt || lnum >= c->leb_cnt ||
793 lnum < c->main_first || offs > c->leb_size ||
794 offs & (c->min_io_size - 1))
795 return -EINVAL;
796
797
798 bud = ubifs_search_bud(c, lnum);
799 if (bud) {
800 if (bud->jhead == jhead && bud->start <= offs)
801 return 1;
802 ubifs_err(c, "bud at LEB %d:%d was already referred", lnum, offs);
803 return -EINVAL;
804 }
805
806 return 0;
807}
808
809
810
811
812
813
814
815
816
817
818
819
820static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
821{
822 int err;
823 struct ubifs_scan_leb *sleb;
824 struct ubifs_scan_node *snod;
825 const struct ubifs_cs_node *node;
826
827 dbg_mnt("replay log LEB %d:%d", lnum, offs);
828 sleb = ubifs_scan(c, lnum, offs, sbuf, c->need_recovery);
829 if (IS_ERR(sleb)) {
830 if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
831 return PTR_ERR(sleb);
832
833
834
835
836
837 sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
838 if (IS_ERR(sleb))
839 return PTR_ERR(sleb);
840 }
841
842 if (sleb->nodes_cnt == 0) {
843 err = 1;
844 goto out;
845 }
846
847 node = sleb->buf;
848 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
849 if (c->cs_sqnum == 0) {
850
851
852
853
854
855
856
857 if (snod->type != UBIFS_CS_NODE) {
858 ubifs_err(c, "first log node at LEB %d:%d is not CS node",
859 lnum, offs);
860 goto out_dump;
861 }
862 if (le64_to_cpu(node->cmt_no) != c->cmt_no) {
863 ubifs_err(c, "first CS node at LEB %d:%d has wrong commit number %llu expected %llu",
864 lnum, offs,
865 (unsigned long long)le64_to_cpu(node->cmt_no),
866 c->cmt_no);
867 goto out_dump;
868 }
869
870 c->cs_sqnum = le64_to_cpu(node->ch.sqnum);
871 dbg_mnt("commit start sqnum %llu", c->cs_sqnum);
872 }
873
874 if (snod->sqnum < c->cs_sqnum) {
875
876
877
878
879
880
881
882 err = 1;
883 goto out;
884 }
885
886
887 if (snod->offs != 0) {
888 ubifs_err(c, "first node is not at zero offset");
889 goto out_dump;
890 }
891
892 list_for_each_entry(snod, &sleb->nodes, list) {
893 cond_resched();
894
895 if (snod->sqnum >= SQNUM_WATERMARK) {
896 ubifs_err(c, "file system's life ended");
897 goto out_dump;
898 }
899
900 if (snod->sqnum < c->cs_sqnum) {
901 ubifs_err(c, "bad sqnum %llu, commit sqnum %llu",
902 snod->sqnum, c->cs_sqnum);
903 goto out_dump;
904 }
905
906 if (snod->sqnum > c->max_sqnum)
907 c->max_sqnum = snod->sqnum;
908
909 switch (snod->type) {
910 case UBIFS_REF_NODE: {
911 const struct ubifs_ref_node *ref = snod->node;
912
913 err = validate_ref(c, ref);
914 if (err == 1)
915 break;
916 if (err)
917 goto out_dump;
918
919 err = add_replay_bud(c, le32_to_cpu(ref->lnum),
920 le32_to_cpu(ref->offs),
921 le32_to_cpu(ref->jhead),
922 snod->sqnum);
923 if (err)
924 goto out;
925
926 break;
927 }
928 case UBIFS_CS_NODE:
929
930 if (snod->offs != 0) {
931 ubifs_err(c, "unexpected node in log");
932 goto out_dump;
933 }
934 break;
935 default:
936 ubifs_err(c, "unexpected node in log");
937 goto out_dump;
938 }
939 }
940
941 if (sleb->endpt || c->lhead_offs >= c->leb_size) {
942 c->lhead_lnum = lnum;
943 c->lhead_offs = sleb->endpt;
944 }
945
946 err = !sleb->endpt;
947out:
948 ubifs_scan_destroy(sleb);
949 return err;
950
951out_dump:
952 ubifs_err(c, "log error detected while replaying the log at LEB %d:%d",
953 lnum, offs + snod->offs);
954 ubifs_dump_node(c, snod->node);
955 ubifs_scan_destroy(sleb);
956 return -EINVAL;
957}
958
959
960
961
962
963
964
965
966static int take_ihead(struct ubifs_info *c)
967{
968 const struct ubifs_lprops *lp;
969 int err, free;
970
971 ubifs_get_lprops(c);
972
973 lp = ubifs_lpt_lookup_dirty(c, c->ihead_lnum);
974 if (IS_ERR(lp)) {
975 err = PTR_ERR(lp);
976 goto out;
977 }
978
979 free = lp->free;
980
981 lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC,
982 lp->flags | LPROPS_TAKEN, 0);
983 if (IS_ERR(lp)) {
984 err = PTR_ERR(lp);
985 goto out;
986 }
987
988 err = free;
989out:
990 ubifs_release_lprops(c);
991 return err;
992}
993
994
995
996
997
998
999
1000
1001
1002int ubifs_replay_journal(struct ubifs_info *c)
1003{
1004 int err, lnum, free;
1005
1006 BUILD_BUG_ON(UBIFS_TRUN_KEY > 5);
1007
1008
1009 free = take_ihead(c);
1010 if (free < 0)
1011 return free;
1012
1013 if (c->ihead_offs != c->leb_size - free) {
1014 ubifs_err(c, "bad index head LEB %d:%d", c->ihead_lnum,
1015 c->ihead_offs);
1016 return -EINVAL;
1017 }
1018
1019 dbg_mnt("start replaying the journal");
1020 c->replaying = 1;
1021 lnum = c->ltail_lnum = c->lhead_lnum;
1022
1023 do {
1024 err = replay_log_leb(c, lnum, 0, c->sbuf);
1025 if (err == 1) {
1026 if (lnum != c->lhead_lnum)
1027
1028 break;
1029
1030
1031
1032
1033
1034
1035
1036
1037 ubifs_err(c, "no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted",
1038 lnum, 0);
1039 err = -EINVAL;
1040 }
1041 if (err)
1042 goto out;
1043 lnum = ubifs_next_log_lnum(c, lnum);
1044 } while (lnum != c->ltail_lnum);
1045
1046 err = replay_buds(c);
1047 if (err)
1048 goto out;
1049
1050 err = apply_replay_list(c);
1051 if (err)
1052 goto out;
1053
1054 err = set_buds_lprops(c);
1055 if (err)
1056 goto out;
1057
1058
1059
1060
1061
1062
1063
1064 c->bi.uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt);
1065 c->bi.uncommitted_idx *= c->max_idx_node_sz;
1066
1067 ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
1068 dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, highest_inum %lu",
1069 c->lhead_lnum, c->lhead_offs, c->max_sqnum,
1070 (unsigned long)c->highest_inum);
1071out:
1072 destroy_replay_list(c);
1073 destroy_bud_list(c);
1074 c->replaying = 0;
1075 return err;
1076}
1077