1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include "ubifs.h"
36#include <linux/list_sort.h>
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55struct replay_entry {
56 int lnum;
57 int offs;
58 int len;
59 unsigned int deletion:1;
60 unsigned long long sqnum;
61 struct list_head list;
62 union ubifs_key key;
63 union {
64 struct qstr nm;
65 struct {
66 loff_t old_size;
67 loff_t new_size;
68 };
69 };
70};
71
72
73
74
75
76
77
78
79
80struct bud_entry {
81 struct list_head list;
82 struct ubifs_bud *bud;
83 unsigned long long sqnum;
84 int free;
85 int dirty;
86};
87
88
89
90
91
92
93
94
95
96
97static int set_bud_lprops(struct ubifs_info *c, struct bud_entry *b)
98{
99 const struct ubifs_lprops *lp;
100 int err = 0, dirty;
101
102 ubifs_get_lprops(c);
103
104 lp = ubifs_lpt_lookup_dirty(c, b->bud->lnum);
105 if (IS_ERR(lp)) {
106 err = PTR_ERR(lp);
107 goto out;
108 }
109
110 dirty = lp->dirty;
111 if (b->bud->start == 0 && (lp->free != c->leb_size || lp->dirty != 0)) {
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131 dbg_mnt("bud LEB %d was GC'd (%d free, %d dirty)", b->bud->lnum,
132 lp->free, lp->dirty);
133 dbg_gc("bud LEB %d was GC'd (%d free, %d dirty)", b->bud->lnum,
134 lp->free, lp->dirty);
135 dirty -= c->leb_size - lp->free;
136
137
138
139
140
141
142
143 if (dirty != 0)
144 dbg_msg("LEB %d lp: %d free %d dirty "
145 "replay: %d free %d dirty", b->bud->lnum,
146 lp->free, lp->dirty, b->free, b->dirty);
147 }
148 lp = ubifs_change_lp(c, lp, b->free, dirty + b->dirty,
149 lp->flags | LPROPS_TAKEN, 0);
150 if (IS_ERR(lp)) {
151 err = PTR_ERR(lp);
152 goto out;
153 }
154
155
156 err = ubifs_wbuf_seek_nolock(&c->jheads[b->bud->jhead].wbuf,
157 b->bud->lnum, c->leb_size - b->free,
158 UBI_SHORTTERM);
159
160out:
161 ubifs_release_lprops(c);
162 return err;
163}
164
165
166
167
168
169
170
171
172static int set_buds_lprops(struct ubifs_info *c)
173{
174 struct bud_entry *b;
175 int err;
176
177 list_for_each_entry(b, &c->replay_buds, list) {
178 err = set_bud_lprops(c, b);
179 if (err)
180 return err;
181 }
182
183 return 0;
184}
185
186
187
188
189
190
191static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
192{
193 unsigned min_blk, max_blk;
194 union ubifs_key min_key, max_key;
195 ino_t ino;
196
197 min_blk = r->new_size / UBIFS_BLOCK_SIZE;
198 if (r->new_size & (UBIFS_BLOCK_SIZE - 1))
199 min_blk += 1;
200
201 max_blk = r->old_size / UBIFS_BLOCK_SIZE;
202 if ((r->old_size & (UBIFS_BLOCK_SIZE - 1)) == 0)
203 max_blk -= 1;
204
205 ino = key_inum(c, &r->key);
206
207 data_key_init(c, &min_key, ino, min_blk);
208 data_key_init(c, &max_key, ino, max_blk);
209
210 return ubifs_tnc_remove_range(c, &min_key, &max_key);
211}
212
213
214
215
216
217
218
219
220static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
221{
222 int err;
223
224 dbg_mntk(&r->key, "LEB %d:%d len %d deletion %d sqnum %llu key ",
225 r->lnum, r->offs, r->len, r->deletion, r->sqnum);
226
227
228 c->replay_sqnum = r->sqnum;
229
230 if (is_hash_key(c, &r->key)) {
231 if (r->deletion)
232 err = ubifs_tnc_remove_nm(c, &r->key, &r->nm);
233 else
234 err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs,
235 r->len, &r->nm);
236 } else {
237 if (r->deletion)
238 switch (key_type(c, &r->key)) {
239 case UBIFS_INO_KEY:
240 {
241 ino_t inum = key_inum(c, &r->key);
242
243 err = ubifs_tnc_remove_ino(c, inum);
244 break;
245 }
246 case UBIFS_TRUN_KEY:
247 err = trun_remove_range(c, r);
248 break;
249 default:
250 err = ubifs_tnc_remove(c, &r->key);
251 break;
252 }
253 else
254 err = ubifs_tnc_add(c, &r->key, r->lnum, r->offs,
255 r->len);
256 if (err)
257 return err;
258
259 if (c->need_recovery)
260 err = ubifs_recover_size_accum(c, &r->key, r->deletion,
261 r->new_size);
262 }
263
264 return err;
265}
266
267
268
269
270
271
272
273
274
275
276
277static int replay_entries_cmp(void *priv, struct list_head *a,
278 struct list_head *b)
279{
280 struct replay_entry *ra, *rb;
281
282 cond_resched();
283 if (a == b)
284 return 0;
285
286 ra = list_entry(a, struct replay_entry, list);
287 rb = list_entry(b, struct replay_entry, list);
288 ubifs_assert(ra->sqnum != rb->sqnum);
289 if (ra->sqnum > rb->sqnum)
290 return 1;
291 return -1;
292}
293
294
295
296
297
298
299
300
301static int apply_replay_list(struct ubifs_info *c)
302{
303 struct replay_entry *r;
304 int err;
305
306 list_sort(c, &c->replay_list, &replay_entries_cmp);
307
308 list_for_each_entry(r, &c->replay_list, list) {
309 cond_resched();
310
311 err = apply_replay_entry(c, r);
312 if (err)
313 return err;
314 }
315
316 return 0;
317}
318
319
320
321
322
323
324
325static void destroy_replay_list(struct ubifs_info *c)
326{
327 struct replay_entry *r, *tmp;
328
329 list_for_each_entry_safe(r, tmp, &c->replay_list, list) {
330 if (is_hash_key(c, &r->key))
331 kfree(r->nm.name);
332 list_del(&r->list);
333 kfree(r);
334 }
335}
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
358 union ubifs_key *key, unsigned long long sqnum,
359 int deletion, int *used, loff_t old_size,
360 loff_t new_size)
361{
362 struct replay_entry *r;
363
364 dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs);
365
366 if (key_inum(c, key) >= c->highest_inum)
367 c->highest_inum = key_inum(c, key);
368
369 r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
370 if (!r)
371 return -ENOMEM;
372
373 if (!deletion)
374 *used += ALIGN(len, 8);
375 r->lnum = lnum;
376 r->offs = offs;
377 r->len = len;
378 r->deletion = !!deletion;
379 r->sqnum = sqnum;
380 key_copy(c, key, &r->key);
381 r->old_size = old_size;
382 r->new_size = new_size;
383
384 list_add_tail(&r->list, &c->replay_list);
385 return 0;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
406 union ubifs_key *key, const char *name, int nlen,
407 unsigned long long sqnum, int deletion, int *used)
408{
409 struct replay_entry *r;
410 char *nbuf;
411
412 dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs);
413 if (key_inum(c, key) >= c->highest_inum)
414 c->highest_inum = key_inum(c, key);
415
416 r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
417 if (!r)
418 return -ENOMEM;
419
420 nbuf = kmalloc(nlen + 1, GFP_KERNEL);
421 if (!nbuf) {
422 kfree(r);
423 return -ENOMEM;
424 }
425
426 if (!deletion)
427 *used += ALIGN(len, 8);
428 r->lnum = lnum;
429 r->offs = offs;
430 r->len = len;
431 r->deletion = !!deletion;
432 r->sqnum = sqnum;
433 key_copy(c, key, &r->key);
434 r->nm.len = nlen;
435 memcpy(nbuf, name, nlen);
436 nbuf[nlen] = '\0';
437 r->nm.name = nbuf;
438
439 list_add_tail(&r->list, &c->replay_list);
440 return 0;
441}
442
443
444
445
446
447
448
449
450
451int ubifs_validate_entry(struct ubifs_info *c,
452 const struct ubifs_dent_node *dent)
453{
454 int key_type = key_type_flash(c, dent->key);
455 int nlen = le16_to_cpu(dent->nlen);
456
457 if (le32_to_cpu(dent->ch.len) != nlen + UBIFS_DENT_NODE_SZ + 1 ||
458 dent->type >= UBIFS_ITYPES_CNT ||
459 nlen > UBIFS_MAX_NLEN || dent->name[nlen] != 0 ||
460 strnlen(dent->name, nlen) != nlen ||
461 le64_to_cpu(dent->inum) > MAX_INUM) {
462 ubifs_err("bad %s node", key_type == UBIFS_DENT_KEY ?
463 "directory entry" : "extended attribute entry");
464 return -EINVAL;
465 }
466
467 if (key_type != UBIFS_DENT_KEY && key_type != UBIFS_XENT_KEY) {
468 ubifs_err("bad key type %d", key_type);
469 return -EINVAL;
470 }
471
472 return 0;
473}
474
475
476
477
478
479
480
481
482
483
484
485static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
486{
487 struct ubifs_jhead *jh = &c->jheads[bud->jhead];
488 struct ubifs_bud *next;
489 uint32_t data;
490 int err;
491
492 if (list_is_last(&bud->list, &jh->buds_list))
493 return 1;
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522 next = list_entry(bud->list.next, struct ubifs_bud, list);
523 if (!list_is_last(&next->list, &jh->buds_list))
524 return 0;
525
526 err = ubifs_leb_read(c, next->lnum, (char *)&data, next->start, 4, 1);
527 if (err)
528 return 0;
529
530 return data == 0xFFFFFFFF;
531}
532
533
534
535
536
537
538
539
540
541
542static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
543{
544 int is_last = is_last_bud(c, b->bud);
545 int err = 0, used = 0, lnum = b->bud->lnum, offs = b->bud->start;
546 struct ubifs_scan_leb *sleb;
547 struct ubifs_scan_node *snod;
548
549 dbg_mnt("replay bud LEB %d, head %d, offs %d, is_last %d",
550 lnum, b->bud->jhead, offs, is_last);
551
552 if (c->need_recovery && is_last)
553
554
555
556
557
558
559 sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
560 else
561 sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
562 if (IS_ERR(sleb))
563 return PTR_ERR(sleb);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587 list_for_each_entry(snod, &sleb->nodes, list) {
588 int deletion = 0;
589
590 cond_resched();
591
592 if (snod->sqnum >= SQNUM_WATERMARK) {
593 ubifs_err("file system's life ended");
594 goto out_dump;
595 }
596
597 if (snod->sqnum > c->max_sqnum)
598 c->max_sqnum = snod->sqnum;
599
600 switch (snod->type) {
601 case UBIFS_INO_NODE:
602 {
603 struct ubifs_ino_node *ino = snod->node;
604 loff_t new_size = le64_to_cpu(ino->size);
605
606 if (le32_to_cpu(ino->nlink) == 0)
607 deletion = 1;
608 err = insert_node(c, lnum, snod->offs, snod->len,
609 &snod->key, snod->sqnum, deletion,
610 &used, 0, new_size);
611 break;
612 }
613 case UBIFS_DATA_NODE:
614 {
615 struct ubifs_data_node *dn = snod->node;
616 loff_t new_size = le32_to_cpu(dn->size) +
617 key_block(c, &snod->key) *
618 UBIFS_BLOCK_SIZE;
619
620 err = insert_node(c, lnum, snod->offs, snod->len,
621 &snod->key, snod->sqnum, deletion,
622 &used, 0, new_size);
623 break;
624 }
625 case UBIFS_DENT_NODE:
626 case UBIFS_XENT_NODE:
627 {
628 struct ubifs_dent_node *dent = snod->node;
629
630 err = ubifs_validate_entry(c, dent);
631 if (err)
632 goto out_dump;
633
634 err = insert_dent(c, lnum, snod->offs, snod->len,
635 &snod->key, dent->name,
636 le16_to_cpu(dent->nlen), snod->sqnum,
637 !le64_to_cpu(dent->inum), &used);
638 break;
639 }
640 case UBIFS_TRUN_NODE:
641 {
642 struct ubifs_trun_node *trun = snod->node;
643 loff_t old_size = le64_to_cpu(trun->old_size);
644 loff_t new_size = le64_to_cpu(trun->new_size);
645 union ubifs_key key;
646
647
648 if (old_size < 0 || old_size > c->max_inode_sz ||
649 new_size < 0 || new_size > c->max_inode_sz ||
650 old_size <= new_size) {
651 ubifs_err("bad truncation node");
652 goto out_dump;
653 }
654
655
656
657
658
659 trun_key_init(c, &key, le32_to_cpu(trun->inum));
660 err = insert_node(c, lnum, snod->offs, snod->len,
661 &key, snod->sqnum, 1, &used,
662 old_size, new_size);
663 break;
664 }
665 default:
666 ubifs_err("unexpected node type %d in bud LEB %d:%d",
667 snod->type, lnum, snod->offs);
668 err = -EINVAL;
669 goto out_dump;
670 }
671 if (err)
672 goto out;
673 }
674
675 ubifs_assert(ubifs_search_bud(c, lnum));
676 ubifs_assert(sleb->endpt - offs >= used);
677 ubifs_assert(sleb->endpt % c->min_io_size == 0);
678
679 b->dirty = sleb->endpt - offs - used;
680 b->free = c->leb_size - sleb->endpt;
681 dbg_mnt("bud LEB %d replied: dirty %d, free %d", lnum, b->dirty, b->free);
682
683out:
684 ubifs_scan_destroy(sleb);
685 return err;
686
687out_dump:
688 ubifs_err("bad node is at LEB %d:%d", lnum, snod->offs);
689 dbg_dump_node(c, snod->node);
690 ubifs_scan_destroy(sleb);
691 return -EINVAL;
692}
693
694
695
696
697
698
699
700
701static int replay_buds(struct ubifs_info *c)
702{
703 struct bud_entry *b;
704 int err;
705 unsigned long long prev_sqnum = 0;
706
707 list_for_each_entry(b, &c->replay_buds, list) {
708 err = replay_bud(c, b);
709 if (err)
710 return err;
711
712 ubifs_assert(b->sqnum > prev_sqnum);
713 prev_sqnum = b->sqnum;
714 }
715
716 return 0;
717}
718
719
720
721
722
723static void destroy_bud_list(struct ubifs_info *c)
724{
725 struct bud_entry *b;
726
727 while (!list_empty(&c->replay_buds)) {
728 b = list_entry(c->replay_buds.next, struct bud_entry, list);
729 list_del(&b->list);
730 kfree(b);
731 }
732}
733
734
735
736
737
738
739
740
741
742
743
744
745static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
746 unsigned long long sqnum)
747{
748 struct ubifs_bud *bud;
749 struct bud_entry *b;
750
751 dbg_mnt("add replay bud LEB %d:%d, head %d", lnum, offs, jhead);
752
753 bud = kmalloc(sizeof(struct ubifs_bud), GFP_KERNEL);
754 if (!bud)
755 return -ENOMEM;
756
757 b = kmalloc(sizeof(struct bud_entry), GFP_KERNEL);
758 if (!b) {
759 kfree(bud);
760 return -ENOMEM;
761 }
762
763 bud->lnum = lnum;
764 bud->start = offs;
765 bud->jhead = jhead;
766 ubifs_add_bud(c, bud);
767
768 b->bud = bud;
769 b->sqnum = sqnum;
770 list_add_tail(&b->list, &c->replay_buds);
771
772 return 0;
773}
774
775
776
777
778
779
780
781
782
783
784
785
786static int validate_ref(struct ubifs_info *c, const struct ubifs_ref_node *ref)
787{
788 struct ubifs_bud *bud;
789 int lnum = le32_to_cpu(ref->lnum);
790 unsigned int offs = le32_to_cpu(ref->offs);
791 unsigned int jhead = le32_to_cpu(ref->jhead);
792
793
794
795
796
797
798 if (jhead >= c->jhead_cnt || lnum >= c->leb_cnt ||
799 lnum < c->main_first || offs > c->leb_size ||
800 offs & (c->min_io_size - 1))
801 return -EINVAL;
802
803
804 bud = ubifs_search_bud(c, lnum);
805 if (bud) {
806 if (bud->jhead == jhead && bud->start <= offs)
807 return 1;
808 ubifs_err("bud at LEB %d:%d was already referred", lnum, offs);
809 return -EINVAL;
810 }
811
812 return 0;
813}
814
815
816
817
818
819
820
821
822
823
824
825
826static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
827{
828 int err;
829 struct ubifs_scan_leb *sleb;
830 struct ubifs_scan_node *snod;
831 const struct ubifs_cs_node *node;
832
833 dbg_mnt("replay log LEB %d:%d", lnum, offs);
834 sleb = ubifs_scan(c, lnum, offs, sbuf, c->need_recovery);
835 if (IS_ERR(sleb)) {
836 if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
837 return PTR_ERR(sleb);
838
839
840
841
842
843 sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
844 if (IS_ERR(sleb))
845 return PTR_ERR(sleb);
846 }
847
848 if (sleb->nodes_cnt == 0) {
849 err = 1;
850 goto out;
851 }
852
853 node = sleb->buf;
854 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
855 if (c->cs_sqnum == 0) {
856
857
858
859
860
861
862
863 if (snod->type != UBIFS_CS_NODE) {
864 dbg_err("first log node at LEB %d:%d is not CS node",
865 lnum, offs);
866 goto out_dump;
867 }
868 if (le64_to_cpu(node->cmt_no) != c->cmt_no) {
869 dbg_err("first CS node at LEB %d:%d has wrong "
870 "commit number %llu expected %llu",
871 lnum, offs,
872 (unsigned long long)le64_to_cpu(node->cmt_no),
873 c->cmt_no);
874 goto out_dump;
875 }
876
877 c->cs_sqnum = le64_to_cpu(node->ch.sqnum);
878 dbg_mnt("commit start sqnum %llu", c->cs_sqnum);
879 }
880
881 if (snod->sqnum < c->cs_sqnum) {
882
883
884
885
886
887
888
889 err = 1;
890 goto out;
891 }
892
893
894 if (snod->offs != 0) {
895 dbg_err("first node is not at zero offset");
896 goto out_dump;
897 }
898
899 list_for_each_entry(snod, &sleb->nodes, list) {
900 cond_resched();
901
902 if (snod->sqnum >= SQNUM_WATERMARK) {
903 ubifs_err("file system's life ended");
904 goto out_dump;
905 }
906
907 if (snod->sqnum < c->cs_sqnum) {
908 dbg_err("bad sqnum %llu, commit sqnum %llu",
909 snod->sqnum, c->cs_sqnum);
910 goto out_dump;
911 }
912
913 if (snod->sqnum > c->max_sqnum)
914 c->max_sqnum = snod->sqnum;
915
916 switch (snod->type) {
917 case UBIFS_REF_NODE: {
918 const struct ubifs_ref_node *ref = snod->node;
919
920 err = validate_ref(c, ref);
921 if (err == 1)
922 break;
923 if (err)
924 goto out_dump;
925
926 err = add_replay_bud(c, le32_to_cpu(ref->lnum),
927 le32_to_cpu(ref->offs),
928 le32_to_cpu(ref->jhead),
929 snod->sqnum);
930 if (err)
931 goto out;
932
933 break;
934 }
935 case UBIFS_CS_NODE:
936
937 if (snod->offs != 0) {
938 ubifs_err("unexpected node in log");
939 goto out_dump;
940 }
941 break;
942 default:
943 ubifs_err("unexpected node in log");
944 goto out_dump;
945 }
946 }
947
948 if (sleb->endpt || c->lhead_offs >= c->leb_size) {
949 c->lhead_lnum = lnum;
950 c->lhead_offs = sleb->endpt;
951 }
952
953 err = !sleb->endpt;
954out:
955 ubifs_scan_destroy(sleb);
956 return err;
957
958out_dump:
959 ubifs_err("log error detected while replaying the log at LEB %d:%d",
960 lnum, offs + snod->offs);
961 dbg_dump_node(c, snod->node);
962 ubifs_scan_destroy(sleb);
963 return -EINVAL;
964}
965
966
967
968
969
970
971
972
973static int take_ihead(struct ubifs_info *c)
974{
975 const struct ubifs_lprops *lp;
976 int err, free;
977
978 ubifs_get_lprops(c);
979
980 lp = ubifs_lpt_lookup_dirty(c, c->ihead_lnum);
981 if (IS_ERR(lp)) {
982 err = PTR_ERR(lp);
983 goto out;
984 }
985
986 free = lp->free;
987
988 lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC,
989 lp->flags | LPROPS_TAKEN, 0);
990 if (IS_ERR(lp)) {
991 err = PTR_ERR(lp);
992 goto out;
993 }
994
995 err = free;
996out:
997 ubifs_release_lprops(c);
998 return err;
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009int ubifs_replay_journal(struct ubifs_info *c)
1010{
1011 int err, i, lnum, offs, free;
1012
1013 BUILD_BUG_ON(UBIFS_TRUN_KEY > 5);
1014
1015
1016 free = take_ihead(c);
1017 if (free < 0)
1018 return free;
1019
1020 if (c->ihead_offs != c->leb_size - free) {
1021 ubifs_err("bad index head LEB %d:%d", c->ihead_lnum,
1022 c->ihead_offs);
1023 return -EINVAL;
1024 }
1025
1026 dbg_mnt("start replaying the journal");
1027 c->replaying = 1;
1028 lnum = c->ltail_lnum = c->lhead_lnum;
1029 offs = c->lhead_offs;
1030
1031 for (i = 0; i < c->log_lebs; i++, lnum++) {
1032 if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) {
1033
1034
1035
1036
1037 lnum = UBIFS_LOG_LNUM;
1038 offs = 0;
1039 }
1040 err = replay_log_leb(c, lnum, offs, c->sbuf);
1041 if (err == 1)
1042
1043 break;
1044 if (err)
1045 goto out;
1046 offs = 0;
1047 }
1048
1049 err = replay_buds(c);
1050 if (err)
1051 goto out;
1052
1053 err = apply_replay_list(c);
1054 if (err)
1055 goto out;
1056
1057 err = set_buds_lprops(c);
1058 if (err)
1059 goto out;
1060
1061
1062
1063
1064
1065
1066
1067 c->bi.uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt);
1068 c->bi.uncommitted_idx *= c->max_idx_node_sz;
1069
1070 ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
1071 dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, "
1072 "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum,
1073 (unsigned long)c->highest_inum);
1074out:
1075 destroy_replay_list(c);
1076 destroy_bud_list(c);
1077 c->replaying = 0;
1078 return err;
1079}
1080