1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_mount.h"
27#include "xfs_da_format.h"
28#include "xfs_da_btree.h"
29#include "xfs_inode.h"
30#include "xfs_trans.h"
31#include "xfs_log.h"
32#include "xfs_log_priv.h"
33#include "xfs_log_recover.h"
34#include "xfs_inode_item.h"
35#include "xfs_extfree_item.h"
36#include "xfs_trans_priv.h"
37#include "xfs_alloc.h"
38#include "xfs_ialloc.h"
39#include "xfs_quota.h"
40#include "xfs_cksum.h"
41#include "xfs_trace.h"
42#include "xfs_icache.h"
43#include "xfs_bmap_btree.h"
44#include "xfs_error.h"
45#include "xfs_dir2.h"
46
47#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
48
49STATIC int
50xlog_find_zeroed(
51 struct xlog *,
52 xfs_daddr_t *);
53STATIC int
54xlog_clear_stale_blocks(
55 struct xlog *,
56 xfs_lsn_t);
57#if defined(DEBUG)
58STATIC void
59xlog_recover_check_summary(
60 struct xlog *);
61#else
62#define xlog_recover_check_summary(log)
63#endif
64STATIC int
65xlog_do_recovery_pass(
66 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
67
68
69
70
71
72struct xfs_buf_cancel {
73 xfs_daddr_t bc_blkno;
74 uint bc_len;
75 int bc_refcount;
76 struct list_head bc_list;
77};
78
79
80
81
82
83
84
85
86
87
88
89static inline int
90xlog_buf_bbcount_valid(
91 struct xlog *log,
92 int bbcount)
93{
94 return bbcount > 0 && bbcount <= log->l_logBBsize;
95}
96
97
98
99
100
101
102STATIC xfs_buf_t *
103xlog_get_bp(
104 struct xlog *log,
105 int nbblks)
106{
107 struct xfs_buf *bp;
108
109 if (!xlog_buf_bbcount_valid(log, nbblks)) {
110 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
111 nbblks);
112 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
113 return NULL;
114 }
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132 if (nbblks > 1 && log->l_sectBBsize > 1)
133 nbblks += log->l_sectBBsize;
134 nbblks = round_up(nbblks, log->l_sectBBsize);
135
136 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
137 if (bp)
138 xfs_buf_unlock(bp);
139 return bp;
140}
141
142STATIC void
143xlog_put_bp(
144 xfs_buf_t *bp)
145{
146 xfs_buf_free(bp);
147}
148
149
150
151
152
153STATIC char *
154xlog_align(
155 struct xlog *log,
156 xfs_daddr_t blk_no,
157 int nbblks,
158 struct xfs_buf *bp)
159{
160 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
161
162 ASSERT(offset + nbblks <= bp->b_length);
163 return bp->b_addr + BBTOB(offset);
164}
165
166
167
168
169
170STATIC int
171xlog_bread_noalign(
172 struct xlog *log,
173 xfs_daddr_t blk_no,
174 int nbblks,
175 struct xfs_buf *bp)
176{
177 int error;
178
179 if (!xlog_buf_bbcount_valid(log, nbblks)) {
180 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
181 nbblks);
182 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
183 return -EFSCORRUPTED;
184 }
185
186 blk_no = round_down(blk_no, log->l_sectBBsize);
187 nbblks = round_up(nbblks, log->l_sectBBsize);
188
189 ASSERT(nbblks > 0);
190 ASSERT(nbblks <= bp->b_length);
191
192 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
193 bp->b_flags |= XBF_READ;
194 bp->b_io_length = nbblks;
195 bp->b_error = 0;
196
197 error = xfs_buf_submit_wait(bp);
198 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
199 xfs_buf_ioerror_alert(bp, __func__);
200 return error;
201}
202
203STATIC int
204xlog_bread(
205 struct xlog *log,
206 xfs_daddr_t blk_no,
207 int nbblks,
208 struct xfs_buf *bp,
209 char **offset)
210{
211 int error;
212
213 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
214 if (error)
215 return error;
216
217 *offset = xlog_align(log, blk_no, nbblks, bp);
218 return 0;
219}
220
221
222
223
224
225STATIC int
226xlog_bread_offset(
227 struct xlog *log,
228 xfs_daddr_t blk_no,
229 int nbblks,
230 struct xfs_buf *bp,
231 char *offset)
232{
233 char *orig_offset = bp->b_addr;
234 int orig_len = BBTOB(bp->b_length);
235 int error, error2;
236
237 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
238 if (error)
239 return error;
240
241 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
242
243
244 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
245 if (error)
246 return error;
247 return error2;
248}
249
250
251
252
253
254
255STATIC int
256xlog_bwrite(
257 struct xlog *log,
258 xfs_daddr_t blk_no,
259 int nbblks,
260 struct xfs_buf *bp)
261{
262 int error;
263
264 if (!xlog_buf_bbcount_valid(log, nbblks)) {
265 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
266 nbblks);
267 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
268 return -EFSCORRUPTED;
269 }
270
271 blk_no = round_down(blk_no, log->l_sectBBsize);
272 nbblks = round_up(nbblks, log->l_sectBBsize);
273
274 ASSERT(nbblks > 0);
275 ASSERT(nbblks <= bp->b_length);
276
277 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
278 xfs_buf_hold(bp);
279 xfs_buf_lock(bp);
280 bp->b_io_length = nbblks;
281 bp->b_error = 0;
282
283 error = xfs_bwrite(bp);
284 if (error)
285 xfs_buf_ioerror_alert(bp, __func__);
286 xfs_buf_relse(bp);
287 return error;
288}
289
290#ifdef DEBUG
291
292
293
294STATIC void
295xlog_header_check_dump(
296 xfs_mount_t *mp,
297 xlog_rec_header_t *head)
298{
299 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
300 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
301 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
302 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
303}
304#else
305#define xlog_header_check_dump(mp, head)
306#endif
307
308
309
310
311STATIC int
312xlog_header_check_recover(
313 xfs_mount_t *mp,
314 xlog_rec_header_t *head)
315{
316 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
317
318
319
320
321
322
323 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
324 xfs_warn(mp,
325 "dirty log written in incompatible format - can't recover");
326 xlog_header_check_dump(mp, head);
327 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
328 XFS_ERRLEVEL_HIGH, mp);
329 return -EFSCORRUPTED;
330 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
331 xfs_warn(mp,
332 "dirty log entry has mismatched uuid - can't recover");
333 xlog_header_check_dump(mp, head);
334 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
335 XFS_ERRLEVEL_HIGH, mp);
336 return -EFSCORRUPTED;
337 }
338 return 0;
339}
340
341
342
343
344STATIC int
345xlog_header_check_mount(
346 xfs_mount_t *mp,
347 xlog_rec_header_t *head)
348{
349 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
350
351 if (uuid_is_nil(&head->h_fs_uuid)) {
352
353
354
355
356
357 xfs_warn(mp, "nil uuid in log - IRIX style log");
358 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
359 xfs_warn(mp, "log has mismatched uuid - can't recover");
360 xlog_header_check_dump(mp, head);
361 XFS_ERROR_REPORT("xlog_header_check_mount",
362 XFS_ERRLEVEL_HIGH, mp);
363 return -EFSCORRUPTED;
364 }
365 return 0;
366}
367
368STATIC void
369xlog_recover_iodone(
370 struct xfs_buf *bp)
371{
372 if (bp->b_error) {
373
374
375
376
377 if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
378 xfs_buf_ioerror_alert(bp, __func__);
379 xfs_force_shutdown(bp->b_target->bt_mount,
380 SHUTDOWN_META_IO_ERROR);
381 }
382 }
383 bp->b_iodone = NULL;
384 xfs_buf_ioend(bp);
385}
386
387
388
389
390
391
392
393STATIC int
394xlog_find_cycle_start(
395 struct xlog *log,
396 struct xfs_buf *bp,
397 xfs_daddr_t first_blk,
398 xfs_daddr_t *last_blk,
399 uint cycle)
400{
401 char *offset;
402 xfs_daddr_t mid_blk;
403 xfs_daddr_t end_blk;
404 uint mid_cycle;
405 int error;
406
407 end_blk = *last_blk;
408 mid_blk = BLK_AVG(first_blk, end_blk);
409 while (mid_blk != first_blk && mid_blk != end_blk) {
410 error = xlog_bread(log, mid_blk, 1, bp, &offset);
411 if (error)
412 return error;
413 mid_cycle = xlog_get_cycle(offset);
414 if (mid_cycle == cycle)
415 end_blk = mid_blk;
416 else
417 first_blk = mid_blk;
418 mid_blk = BLK_AVG(first_blk, end_blk);
419 }
420 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
421 (mid_blk == end_blk && mid_blk-1 == first_blk));
422
423 *last_blk = end_blk;
424
425 return 0;
426}
427
428
429
430
431
432
433
434
435
436STATIC int
437xlog_find_verify_cycle(
438 struct xlog *log,
439 xfs_daddr_t start_blk,
440 int nbblks,
441 uint stop_on_cycle_no,
442 xfs_daddr_t *new_blk)
443{
444 xfs_daddr_t i, j;
445 uint cycle;
446 xfs_buf_t *bp;
447 xfs_daddr_t bufblks;
448 char *buf = NULL;
449 int error = 0;
450
451
452
453
454
455
456
457 bufblks = 1 << ffs(nbblks);
458 while (bufblks > log->l_logBBsize)
459 bufblks >>= 1;
460 while (!(bp = xlog_get_bp(log, bufblks))) {
461 bufblks >>= 1;
462 if (bufblks < log->l_sectBBsize)
463 return -ENOMEM;
464 }
465
466 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
467 int bcount;
468
469 bcount = min(bufblks, (start_blk + nbblks - i));
470
471 error = xlog_bread(log, i, bcount, bp, &buf);
472 if (error)
473 goto out;
474
475 for (j = 0; j < bcount; j++) {
476 cycle = xlog_get_cycle(buf);
477 if (cycle == stop_on_cycle_no) {
478 *new_blk = i+j;
479 goto out;
480 }
481
482 buf += BBSIZE;
483 }
484 }
485
486 *new_blk = -1;
487
488out:
489 xlog_put_bp(bp);
490 return error;
491}
492
493
494
495
496
497
498
499
500
501
502
503
504
505STATIC int
506xlog_find_verify_log_record(
507 struct xlog *log,
508 xfs_daddr_t start_blk,
509 xfs_daddr_t *last_blk,
510 int extra_bblks)
511{
512 xfs_daddr_t i;
513 xfs_buf_t *bp;
514 char *offset = NULL;
515 xlog_rec_header_t *head = NULL;
516 int error = 0;
517 int smallmem = 0;
518 int num_blks = *last_blk - start_blk;
519 int xhdrs;
520
521 ASSERT(start_blk != 0 || *last_blk != start_blk);
522
523 if (!(bp = xlog_get_bp(log, num_blks))) {
524 if (!(bp = xlog_get_bp(log, 1)))
525 return -ENOMEM;
526 smallmem = 1;
527 } else {
528 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
529 if (error)
530 goto out;
531 offset += ((num_blks - 1) << BBSHIFT);
532 }
533
534 for (i = (*last_blk) - 1; i >= 0; i--) {
535 if (i < start_blk) {
536
537 xfs_warn(log->l_mp,
538 "Log inconsistent (didn't find previous header)");
539 ASSERT(0);
540 error = -EIO;
541 goto out;
542 }
543
544 if (smallmem) {
545 error = xlog_bread(log, i, 1, bp, &offset);
546 if (error)
547 goto out;
548 }
549
550 head = (xlog_rec_header_t *)offset;
551
552 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
553 break;
554
555 if (!smallmem)
556 offset -= BBSIZE;
557 }
558
559
560
561
562
563
564 if (i == -1) {
565 error = 1;
566 goto out;
567 }
568
569
570
571
572
573 if ((error = xlog_header_check_mount(log->l_mp, head)))
574 goto out;
575
576
577
578
579
580
581
582
583 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
584 uint h_size = be32_to_cpu(head->h_size);
585
586 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
587 if (h_size % XLOG_HEADER_CYCLE_SIZE)
588 xhdrs++;
589 } else {
590 xhdrs = 1;
591 }
592
593 if (*last_blk - i + extra_bblks !=
594 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
595 *last_blk = i;
596
597out:
598 xlog_put_bp(bp);
599 return error;
600}
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615STATIC int
616xlog_find_head(
617 struct xlog *log,
618 xfs_daddr_t *return_head_blk)
619{
620 xfs_buf_t *bp;
621 char *offset;
622 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
623 int num_scan_bblks;
624 uint first_half_cycle, last_half_cycle;
625 uint stop_on_cycle;
626 int error, log_bbnum = log->l_logBBsize;
627
628
629 error = xlog_find_zeroed(log, &first_blk);
630 if (error < 0) {
631 xfs_warn(log->l_mp, "empty log check failed");
632 return error;
633 }
634 if (error == 1) {
635 *return_head_blk = first_blk;
636
637
638 if (!first_blk) {
639
640
641
642
643 xfs_warn(log->l_mp, "totally zeroed log");
644 }
645
646 return 0;
647 }
648
649 first_blk = 0;
650 bp = xlog_get_bp(log, 1);
651 if (!bp)
652 return -ENOMEM;
653
654 error = xlog_bread(log, 0, 1, bp, &offset);
655 if (error)
656 goto bp_err;
657
658 first_half_cycle = xlog_get_cycle(offset);
659
660 last_blk = head_blk = log_bbnum - 1;
661 error = xlog_bread(log, last_blk, 1, bp, &offset);
662 if (error)
663 goto bp_err;
664
665 last_half_cycle = xlog_get_cycle(offset);
666 ASSERT(last_half_cycle != 0);
667
668
669
670
671
672
673
674
675
676
677
678
679 if (first_half_cycle == last_half_cycle) {
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705 head_blk = log_bbnum;
706 stop_on_cycle = last_half_cycle - 1;
707 } else {
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730 stop_on_cycle = last_half_cycle;
731 if ((error = xlog_find_cycle_start(log, bp, first_blk,
732 &head_blk, last_half_cycle)))
733 goto bp_err;
734 }
735
736
737
738
739
740
741
742
743 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
744 if (head_blk >= num_scan_bblks) {
745
746
747
748
749 start_blk = head_blk - num_scan_bblks;
750 if ((error = xlog_find_verify_cycle(log,
751 start_blk, num_scan_bblks,
752 stop_on_cycle, &new_blk)))
753 goto bp_err;
754 if (new_blk != -1)
755 head_blk = new_blk;
756 } else {
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784 ASSERT(head_blk <= INT_MAX &&
785 (xfs_daddr_t) num_scan_bblks >= head_blk);
786 start_blk = log_bbnum - (num_scan_bblks - head_blk);
787 if ((error = xlog_find_verify_cycle(log, start_blk,
788 num_scan_bblks - (int)head_blk,
789 (stop_on_cycle - 1), &new_blk)))
790 goto bp_err;
791 if (new_blk != -1) {
792 head_blk = new_blk;
793 goto validate_head;
794 }
795
796
797
798
799
800
801 start_blk = 0;
802 ASSERT(head_blk <= INT_MAX);
803 if ((error = xlog_find_verify_cycle(log,
804 start_blk, (int)head_blk,
805 stop_on_cycle, &new_blk)))
806 goto bp_err;
807 if (new_blk != -1)
808 head_blk = new_blk;
809 }
810
811validate_head:
812
813
814
815
816 num_scan_bblks = XLOG_REC_SHIFT(log);
817 if (head_blk >= num_scan_bblks) {
818 start_blk = head_blk - num_scan_bblks;
819
820
821 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
822 if (error == 1)
823 error = -EIO;
824 if (error)
825 goto bp_err;
826 } else {
827 start_blk = 0;
828 ASSERT(head_blk <= INT_MAX);
829 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
830 if (error < 0)
831 goto bp_err;
832 if (error == 1) {
833
834 start_blk = log_bbnum - (num_scan_bblks - head_blk);
835 new_blk = log_bbnum;
836 ASSERT(start_blk <= INT_MAX &&
837 (xfs_daddr_t) log_bbnum-start_blk >= 0);
838 ASSERT(head_blk <= INT_MAX);
839 error = xlog_find_verify_log_record(log, start_blk,
840 &new_blk, (int)head_blk);
841 if (error == 1)
842 error = -EIO;
843 if (error)
844 goto bp_err;
845 if (new_blk != log_bbnum)
846 head_blk = new_blk;
847 } else if (error)
848 goto bp_err;
849 }
850
851 xlog_put_bp(bp);
852 if (head_blk == log_bbnum)
853 *return_head_blk = 0;
854 else
855 *return_head_blk = head_blk;
856
857
858
859
860
861
862 return 0;
863
864 bp_err:
865 xlog_put_bp(bp);
866
867 if (error)
868 xfs_warn(log->l_mp, "failed to find log head");
869 return error;
870}
871
872
873
874
875
876
877
878
879
880STATIC int
881xlog_rseek_logrec_hdr(
882 struct xlog *log,
883 xfs_daddr_t head_blk,
884 xfs_daddr_t tail_blk,
885 int count,
886 struct xfs_buf *bp,
887 xfs_daddr_t *rblk,
888 struct xlog_rec_header **rhead,
889 bool *wrapped)
890{
891 int i;
892 int error;
893 int found = 0;
894 char *offset = NULL;
895 xfs_daddr_t end_blk;
896
897 *wrapped = false;
898
899
900
901
902
903 end_blk = head_blk > tail_blk ? tail_blk : 0;
904 for (i = (int) head_blk - 1; i >= end_blk; i--) {
905 error = xlog_bread(log, i, 1, bp, &offset);
906 if (error)
907 goto out_error;
908
909 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
910 *rblk = i;
911 *rhead = (struct xlog_rec_header *) offset;
912 if (++found == count)
913 break;
914 }
915 }
916
917
918
919
920
921
922 if (tail_blk >= head_blk && found != count) {
923 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
924 error = xlog_bread(log, i, 1, bp, &offset);
925 if (error)
926 goto out_error;
927
928 if (*(__be32 *)offset ==
929 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
930 *wrapped = true;
931 *rblk = i;
932 *rhead = (struct xlog_rec_header *) offset;
933 if (++found == count)
934 break;
935 }
936 }
937 }
938
939 return found;
940
941out_error:
942 return error;
943}
944
945
946
947
948
949
950
951
952
953
954STATIC int
955xlog_seek_logrec_hdr(
956 struct xlog *log,
957 xfs_daddr_t head_blk,
958 xfs_daddr_t tail_blk,
959 int count,
960 struct xfs_buf *bp,
961 xfs_daddr_t *rblk,
962 struct xlog_rec_header **rhead,
963 bool *wrapped)
964{
965 int i;
966 int error;
967 int found = 0;
968 char *offset = NULL;
969 xfs_daddr_t end_blk;
970
971 *wrapped = false;
972
973
974
975
976
977 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
978 for (i = (int) tail_blk; i <= end_blk; i++) {
979 error = xlog_bread(log, i, 1, bp, &offset);
980 if (error)
981 goto out_error;
982
983 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
984 *rblk = i;
985 *rhead = (struct xlog_rec_header *) offset;
986 if (++found == count)
987 break;
988 }
989 }
990
991
992
993
994
995 if (tail_blk > head_blk && found != count) {
996 for (i = 0; i < (int) head_blk; i++) {
997 error = xlog_bread(log, i, 1, bp, &offset);
998 if (error)
999 goto out_error;
1000
1001 if (*(__be32 *)offset ==
1002 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1003 *wrapped = true;
1004 *rblk = i;
1005 *rhead = (struct xlog_rec_header *) offset;
1006 if (++found == count)
1007 break;
1008 }
1009 }
1010 }
1011
1012 return found;
1013
1014out_error:
1015 return error;
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026STATIC int
1027xlog_verify_tail(
1028 struct xlog *log,
1029 xfs_daddr_t head_blk,
1030 xfs_daddr_t tail_blk)
1031{
1032 struct xlog_rec_header *thead;
1033 struct xfs_buf *bp;
1034 xfs_daddr_t first_bad;
1035 int count;
1036 int error = 0;
1037 bool wrapped;
1038 xfs_daddr_t tmp_head;
1039
1040 bp = xlog_get_bp(log, 1);
1041 if (!bp)
1042 return -ENOMEM;
1043
1044
1045
1046
1047
1048
1049 count = xlog_seek_logrec_hdr(log, head_blk, tail_blk,
1050 XLOG_MAX_ICLOGS + 1, bp, &tmp_head, &thead,
1051 &wrapped);
1052 if (count < 0) {
1053 error = count;
1054 goto out;
1055 }
1056
1057
1058
1059
1060
1061
1062 if (count < XLOG_MAX_ICLOGS + 1)
1063 tmp_head = head_blk;
1064
1065
1066
1067
1068
1069
1070
1071 error = xlog_do_recovery_pass(log, tmp_head, tail_blk,
1072 XLOG_RECOVER_CRCPASS, &first_bad);
1073
1074out:
1075 xlog_put_bp(bp);
1076 return error;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092STATIC int
1093xlog_verify_head(
1094 struct xlog *log,
1095 xfs_daddr_t *head_blk,
1096 xfs_daddr_t *tail_blk,
1097 struct xfs_buf *bp,
1098 xfs_daddr_t *rhead_blk,
1099 struct xlog_rec_header **rhead,
1100 bool *wrapped)
1101{
1102 struct xlog_rec_header *tmp_rhead;
1103 struct xfs_buf *tmp_bp;
1104 xfs_daddr_t first_bad;
1105 xfs_daddr_t tmp_rhead_blk;
1106 int found;
1107 int error;
1108 bool tmp_wrapped;
1109
1110
1111
1112
1113
1114
1115
1116 tmp_bp = xlog_get_bp(log, 1);
1117 if (!tmp_bp)
1118 return -ENOMEM;
1119 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1120 XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1121 &tmp_rhead, &tmp_wrapped);
1122 xlog_put_bp(tmp_bp);
1123 if (error < 0)
1124 return error;
1125
1126
1127
1128
1129
1130
1131 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1132 XLOG_RECOVER_CRCPASS, &first_bad);
1133 if (error == -EFSBADCRC) {
1134
1135
1136
1137
1138 error = 0;
1139 xfs_warn(log->l_mp,
1140"Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1141 first_bad, *head_blk);
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1152 rhead_blk, rhead, wrapped);
1153 if (found < 0)
1154 return found;
1155 if (found == 0)
1156 return -EIO;
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 *head_blk = first_bad;
1168 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1169 if (*head_blk == *tail_blk) {
1170 ASSERT(0);
1171 return 0;
1172 }
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 error = xlog_verify_tail(log, *head_blk, *tail_blk);
1195 }
1196
1197 return error;
1198}
1199
1200
1201
1202
1203
1204
1205static int
1206xlog_check_unmount_rec(
1207 struct xlog *log,
1208 xfs_daddr_t *head_blk,
1209 xfs_daddr_t *tail_blk,
1210 struct xlog_rec_header *rhead,
1211 xfs_daddr_t rhead_blk,
1212 struct xfs_buf *bp,
1213 bool *clean)
1214{
1215 struct xlog_op_header *op_head;
1216 xfs_daddr_t umount_data_blk;
1217 xfs_daddr_t after_umount_blk;
1218 int hblks;
1219 int error;
1220 char *offset;
1221
1222 *clean = false;
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1234 int h_size = be32_to_cpu(rhead->h_size);
1235 int h_version = be32_to_cpu(rhead->h_version);
1236
1237 if ((h_version & XLOG_VERSION_2) &&
1238 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1239 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1240 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1241 hblks++;
1242 } else {
1243 hblks = 1;
1244 }
1245 } else {
1246 hblks = 1;
1247 }
1248 after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
1249 after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
1250 if (*head_blk == after_umount_blk &&
1251 be32_to_cpu(rhead->h_num_logops) == 1) {
1252 umount_data_blk = rhead_blk + hblks;
1253 umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1254 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1255 if (error)
1256 return error;
1257
1258 op_head = (struct xlog_op_header *)offset;
1259 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1260
1261
1262
1263
1264
1265 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1266 log->l_curr_cycle, after_umount_blk);
1267 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1268 log->l_curr_cycle, after_umount_blk);
1269 *tail_blk = after_umount_blk;
1270
1271 *clean = true;
1272 }
1273 }
1274
1275 return 0;
1276}
1277
1278static void
1279xlog_set_state(
1280 struct xlog *log,
1281 xfs_daddr_t head_blk,
1282 struct xlog_rec_header *rhead,
1283 xfs_daddr_t rhead_blk,
1284 bool bump_cycle)
1285{
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296 log->l_prev_block = rhead_blk;
1297 log->l_curr_block = (int)head_blk;
1298 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1299 if (bump_cycle)
1300 log->l_curr_cycle++;
1301 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1302 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1303 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1304 BBTOB(log->l_curr_block));
1305 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1306 BBTOB(log->l_curr_block));
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325STATIC int
1326xlog_find_tail(
1327 struct xlog *log,
1328 xfs_daddr_t *head_blk,
1329 xfs_daddr_t *tail_blk)
1330{
1331 xlog_rec_header_t *rhead;
1332 char *offset = NULL;
1333 xfs_buf_t *bp;
1334 int error;
1335 xfs_daddr_t rhead_blk;
1336 xfs_lsn_t tail_lsn;
1337 bool wrapped = false;
1338 bool clean = false;
1339
1340
1341
1342
1343 if ((error = xlog_find_head(log, head_blk)))
1344 return error;
1345 ASSERT(*head_blk < INT_MAX);
1346
1347 bp = xlog_get_bp(log, 1);
1348 if (!bp)
1349 return -ENOMEM;
1350 if (*head_blk == 0) {
1351 error = xlog_bread(log, 0, 1, bp, &offset);
1352 if (error)
1353 goto done;
1354
1355 if (xlog_get_cycle(offset) == 0) {
1356 *tail_blk = 0;
1357
1358 goto done;
1359 }
1360 }
1361
1362
1363
1364
1365
1366
1367 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
1368 &rhead_blk, &rhead, &wrapped);
1369 if (error < 0)
1370 return error;
1371 if (!error) {
1372 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1373 return -EIO;
1374 }
1375 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1376
1377
1378
1379
1380 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1381 tail_lsn = atomic64_read(&log->l_tail_lsn);
1382
1383
1384
1385
1386
1387 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1388 rhead_blk, bp, &clean);
1389 if (error)
1390 goto done;
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402 if (!clean) {
1403 xfs_daddr_t orig_head = *head_blk;
1404
1405 error = xlog_verify_head(log, head_blk, tail_blk, bp,
1406 &rhead_blk, &rhead, &wrapped);
1407 if (error)
1408 goto done;
1409
1410
1411 if (*head_blk != orig_head) {
1412 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1413 wrapped);
1414 tail_lsn = atomic64_read(&log->l_tail_lsn);
1415 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1416 rhead, rhead_blk, bp,
1417 &clean);
1418 if (error)
1419 goto done;
1420 }
1421 }
1422
1423
1424
1425
1426
1427
1428 if (clean)
1429 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1451 error = xlog_clear_stale_blocks(log, tail_lsn);
1452
1453done:
1454 xlog_put_bp(bp);
1455
1456 if (error)
1457 xfs_warn(log->l_mp, "failed to locate log tail");
1458 return error;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477STATIC int
1478xlog_find_zeroed(
1479 struct xlog *log,
1480 xfs_daddr_t *blk_no)
1481{
1482 xfs_buf_t *bp;
1483 char *offset;
1484 uint first_cycle, last_cycle;
1485 xfs_daddr_t new_blk, last_blk, start_blk;
1486 xfs_daddr_t num_scan_bblks;
1487 int error, log_bbnum = log->l_logBBsize;
1488
1489 *blk_no = 0;
1490
1491
1492 bp = xlog_get_bp(log, 1);
1493 if (!bp)
1494 return -ENOMEM;
1495 error = xlog_bread(log, 0, 1, bp, &offset);
1496 if (error)
1497 goto bp_err;
1498
1499 first_cycle = xlog_get_cycle(offset);
1500 if (first_cycle == 0) {
1501 *blk_no = 0;
1502 xlog_put_bp(bp);
1503 return 1;
1504 }
1505
1506
1507 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1508 if (error)
1509 goto bp_err;
1510
1511 last_cycle = xlog_get_cycle(offset);
1512 if (last_cycle != 0) {
1513 xlog_put_bp(bp);
1514 return 0;
1515 } else if (first_cycle != 1) {
1516
1517
1518
1519
1520
1521 xfs_warn(log->l_mp,
1522 "Log inconsistent or not a log (last==0, first!=1)");
1523 error = -EINVAL;
1524 goto bp_err;
1525 }
1526
1527
1528 last_blk = log_bbnum-1;
1529 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1530 goto bp_err;
1531
1532
1533
1534
1535
1536
1537
1538 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1539 ASSERT(num_scan_bblks <= INT_MAX);
1540
1541 if (last_blk < num_scan_bblks)
1542 num_scan_bblks = last_blk;
1543 start_blk = last_blk - num_scan_bblks;
1544
1545
1546
1547
1548
1549
1550
1551 if ((error = xlog_find_verify_cycle(log, start_blk,
1552 (int)num_scan_bblks, 0, &new_blk)))
1553 goto bp_err;
1554 if (new_blk != -1)
1555 last_blk = new_blk;
1556
1557
1558
1559
1560
1561 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1562 if (error == 1)
1563 error = -EIO;
1564 if (error)
1565 goto bp_err;
1566
1567 *blk_no = last_blk;
1568bp_err:
1569 xlog_put_bp(bp);
1570 if (error)
1571 return error;
1572 return 1;
1573}
1574
1575
1576
1577
1578
1579
1580STATIC void
1581xlog_add_record(
1582 struct xlog *log,
1583 char *buf,
1584 int cycle,
1585 int block,
1586 int tail_cycle,
1587 int tail_block)
1588{
1589 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1590
1591 memset(buf, 0, BBSIZE);
1592 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1593 recp->h_cycle = cpu_to_be32(cycle);
1594 recp->h_version = cpu_to_be32(
1595 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1596 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1597 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1598 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1599 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1600}
1601
1602STATIC int
1603xlog_write_log_records(
1604 struct xlog *log,
1605 int cycle,
1606 int start_block,
1607 int blocks,
1608 int tail_cycle,
1609 int tail_block)
1610{
1611 char *offset;
1612 xfs_buf_t *bp;
1613 int balign, ealign;
1614 int sectbb = log->l_sectBBsize;
1615 int end_block = start_block + blocks;
1616 int bufblks;
1617 int error = 0;
1618 int i, j = 0;
1619
1620
1621
1622
1623
1624
1625
1626 bufblks = 1 << ffs(blocks);
1627 while (bufblks > log->l_logBBsize)
1628 bufblks >>= 1;
1629 while (!(bp = xlog_get_bp(log, bufblks))) {
1630 bufblks >>= 1;
1631 if (bufblks < sectbb)
1632 return -ENOMEM;
1633 }
1634
1635
1636
1637
1638
1639 balign = round_down(start_block, sectbb);
1640 if (balign != start_block) {
1641 error = xlog_bread_noalign(log, start_block, 1, bp);
1642 if (error)
1643 goto out_put_bp;
1644
1645 j = start_block - balign;
1646 }
1647
1648 for (i = start_block; i < end_block; i += bufblks) {
1649 int bcount, endcount;
1650
1651 bcount = min(bufblks, end_block - start_block);
1652 endcount = bcount - j;
1653
1654
1655
1656
1657
1658 ealign = round_down(end_block, sectbb);
1659 if (j == 0 && (start_block + endcount > ealign)) {
1660 offset = bp->b_addr + BBTOB(ealign - start_block);
1661 error = xlog_bread_offset(log, ealign, sectbb,
1662 bp, offset);
1663 if (error)
1664 break;
1665
1666 }
1667
1668 offset = xlog_align(log, start_block, endcount, bp);
1669 for (; j < endcount; j++) {
1670 xlog_add_record(log, offset, cycle, i+j,
1671 tail_cycle, tail_block);
1672 offset += BBSIZE;
1673 }
1674 error = xlog_bwrite(log, start_block, endcount, bp);
1675 if (error)
1676 break;
1677 start_block += endcount;
1678 j = 0;
1679 }
1680
1681 out_put_bp:
1682 xlog_put_bp(bp);
1683 return error;
1684}
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702STATIC int
1703xlog_clear_stale_blocks(
1704 struct xlog *log,
1705 xfs_lsn_t tail_lsn)
1706{
1707 int tail_cycle, head_cycle;
1708 int tail_block, head_block;
1709 int tail_distance, max_distance;
1710 int distance;
1711 int error;
1712
1713 tail_cycle = CYCLE_LSN(tail_lsn);
1714 tail_block = BLOCK_LSN(tail_lsn);
1715 head_cycle = log->l_curr_cycle;
1716 head_block = log->l_curr_block;
1717
1718
1719
1720
1721
1722
1723
1724 if (head_cycle == tail_cycle) {
1725
1726
1727
1728
1729
1730
1731
1732 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1733 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1734 XFS_ERRLEVEL_LOW, log->l_mp);
1735 return -EFSCORRUPTED;
1736 }
1737 tail_distance = tail_block + (log->l_logBBsize - head_block);
1738 } else {
1739
1740
1741
1742
1743
1744 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1745 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1746 XFS_ERRLEVEL_LOW, log->l_mp);
1747 return -EFSCORRUPTED;
1748 }
1749 tail_distance = tail_block - head_block;
1750 }
1751
1752
1753
1754
1755
1756 if (tail_distance <= 0) {
1757 ASSERT(tail_distance == 0);
1758 return 0;
1759 }
1760
1761 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1762
1763
1764
1765
1766
1767
1768
1769 max_distance = MIN(max_distance, tail_distance);
1770
1771 if ((head_block + max_distance) <= log->l_logBBsize) {
1772
1773
1774
1775
1776
1777
1778
1779 error = xlog_write_log_records(log, (head_cycle - 1),
1780 head_block, max_distance, tail_cycle,
1781 tail_block);
1782 if (error)
1783 return error;
1784 } else {
1785
1786
1787
1788
1789
1790
1791
1792 distance = log->l_logBBsize - head_block;
1793 error = xlog_write_log_records(log, (head_cycle - 1),
1794 head_block, distance, tail_cycle,
1795 tail_block);
1796
1797 if (error)
1798 return error;
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808 distance = max_distance - (log->l_logBBsize - head_block);
1809 error = xlog_write_log_records(log, head_cycle, 0, distance,
1810 tail_cycle, tail_block);
1811 if (error)
1812 return error;
1813 }
1814
1815 return 0;
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874STATIC int
1875xlog_recover_reorder_trans(
1876 struct xlog *log,
1877 struct xlog_recover *trans,
1878 int pass)
1879{
1880 xlog_recover_item_t *item, *n;
1881 int error = 0;
1882 LIST_HEAD(sort_list);
1883 LIST_HEAD(cancel_list);
1884 LIST_HEAD(buffer_list);
1885 LIST_HEAD(inode_buffer_list);
1886 LIST_HEAD(inode_list);
1887
1888 list_splice_init(&trans->r_itemq, &sort_list);
1889 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1890 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1891
1892 switch (ITEM_TYPE(item)) {
1893 case XFS_LI_ICREATE:
1894 list_move_tail(&item->ri_list, &buffer_list);
1895 break;
1896 case XFS_LI_BUF:
1897 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1898 trace_xfs_log_recover_item_reorder_head(log,
1899 trans, item, pass);
1900 list_move(&item->ri_list, &cancel_list);
1901 break;
1902 }
1903 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1904 list_move(&item->ri_list, &inode_buffer_list);
1905 break;
1906 }
1907 list_move_tail(&item->ri_list, &buffer_list);
1908 break;
1909 case XFS_LI_INODE:
1910 case XFS_LI_DQUOT:
1911 case XFS_LI_QUOTAOFF:
1912 case XFS_LI_EFD:
1913 case XFS_LI_EFI:
1914 trace_xfs_log_recover_item_reorder_tail(log,
1915 trans, item, pass);
1916 list_move_tail(&item->ri_list, &inode_list);
1917 break;
1918 default:
1919 xfs_warn(log->l_mp,
1920 "%s: unrecognized type of log operation",
1921 __func__);
1922 ASSERT(0);
1923
1924
1925
1926
1927 if (!list_empty(&sort_list))
1928 list_splice_init(&sort_list, &trans->r_itemq);
1929 error = -EIO;
1930 goto out;
1931 }
1932 }
1933out:
1934 ASSERT(list_empty(&sort_list));
1935 if (!list_empty(&buffer_list))
1936 list_splice(&buffer_list, &trans->r_itemq);
1937 if (!list_empty(&inode_list))
1938 list_splice_tail(&inode_list, &trans->r_itemq);
1939 if (!list_empty(&inode_buffer_list))
1940 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1941 if (!list_empty(&cancel_list))
1942 list_splice_tail(&cancel_list, &trans->r_itemq);
1943 return error;
1944}
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958STATIC int
1959xlog_recover_buffer_pass1(
1960 struct xlog *log,
1961 struct xlog_recover_item *item)
1962{
1963 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1964 struct list_head *bucket;
1965 struct xfs_buf_cancel *bcp;
1966
1967
1968
1969
1970 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1971 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1972 return 0;
1973 }
1974
1975
1976
1977
1978
1979 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1980 list_for_each_entry(bcp, bucket, bc_list) {
1981 if (bcp->bc_blkno == buf_f->blf_blkno &&
1982 bcp->bc_len == buf_f->blf_len) {
1983 bcp->bc_refcount++;
1984 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1985 return 0;
1986 }
1987 }
1988
1989 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1990 bcp->bc_blkno = buf_f->blf_blkno;
1991 bcp->bc_len = buf_f->blf_len;
1992 bcp->bc_refcount = 1;
1993 list_add_tail(&bcp->bc_list, bucket);
1994
1995 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1996 return 0;
1997}
1998
1999
2000
2001
2002
2003
2004STATIC struct xfs_buf_cancel *
2005xlog_peek_buffer_cancelled(
2006 struct xlog *log,
2007 xfs_daddr_t blkno,
2008 uint len,
2009 ushort flags)
2010{
2011 struct list_head *bucket;
2012 struct xfs_buf_cancel *bcp;
2013
2014 if (!log->l_buf_cancel_table) {
2015
2016 ASSERT(!(flags & XFS_BLF_CANCEL));
2017 return NULL;
2018 }
2019
2020 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
2021 list_for_each_entry(bcp, bucket, bc_list) {
2022 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
2023 return bcp;
2024 }
2025
2026
2027
2028
2029
2030 ASSERT(!(flags & XFS_BLF_CANCEL));
2031 return NULL;
2032}
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044STATIC int
2045xlog_check_buffer_cancelled(
2046 struct xlog *log,
2047 xfs_daddr_t blkno,
2048 uint len,
2049 ushort flags)
2050{
2051 struct xfs_buf_cancel *bcp;
2052
2053 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2054 if (!bcp)
2055 return 0;
2056
2057
2058
2059
2060
2061
2062
2063 if (flags & XFS_BLF_CANCEL) {
2064 if (--bcp->bc_refcount == 0) {
2065 list_del(&bcp->bc_list);
2066 kmem_free(bcp);
2067 }
2068 }
2069 return 1;
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084STATIC int
2085xlog_recover_do_inode_buffer(
2086 struct xfs_mount *mp,
2087 xlog_recover_item_t *item,
2088 struct xfs_buf *bp,
2089 xfs_buf_log_format_t *buf_f)
2090{
2091 int i;
2092 int item_index = 0;
2093 int bit = 0;
2094 int nbits = 0;
2095 int reg_buf_offset = 0;
2096 int reg_buf_bytes = 0;
2097 int next_unlinked_offset;
2098 int inodes_per_buf;
2099 xfs_agino_t *logged_nextp;
2100 xfs_agino_t *buffer_nextp;
2101
2102 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2103
2104
2105
2106
2107
2108 if (xfs_sb_version_hascrc(&mp->m_sb))
2109 bp->b_ops = &xfs_inode_buf_ops;
2110
2111 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2112 for (i = 0; i < inodes_per_buf; i++) {
2113 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2114 offsetof(xfs_dinode_t, di_next_unlinked);
2115
2116 while (next_unlinked_offset >=
2117 (reg_buf_offset + reg_buf_bytes)) {
2118
2119
2120
2121
2122
2123
2124 bit += nbits;
2125 bit = xfs_next_bit(buf_f->blf_data_map,
2126 buf_f->blf_map_size, bit);
2127
2128
2129
2130
2131
2132 if (bit == -1)
2133 return 0;
2134
2135 nbits = xfs_contig_bits(buf_f->blf_data_map,
2136 buf_f->blf_map_size, bit);
2137 ASSERT(nbits > 0);
2138 reg_buf_offset = bit << XFS_BLF_SHIFT;
2139 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2140 item_index++;
2141 }
2142
2143
2144
2145
2146
2147
2148 if (next_unlinked_offset < reg_buf_offset)
2149 continue;
2150
2151 ASSERT(item->ri_buf[item_index].i_addr != NULL);
2152 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2153 ASSERT((reg_buf_offset + reg_buf_bytes) <=
2154 BBTOB(bp->b_io_length));
2155
2156
2157
2158
2159
2160
2161 logged_nextp = item->ri_buf[item_index].i_addr +
2162 next_unlinked_offset - reg_buf_offset;
2163 if (unlikely(*logged_nextp == 0)) {
2164 xfs_alert(mp,
2165 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
2166 "Trying to replay bad (0) inode di_next_unlinked field.",
2167 item, bp);
2168 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2169 XFS_ERRLEVEL_LOW, mp);
2170 return -EFSCORRUPTED;
2171 }
2172
2173 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2174 *buffer_nextp = *logged_nextp;
2175
2176
2177
2178
2179
2180
2181 xfs_dinode_calc_crc(mp,
2182 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2183
2184 }
2185
2186 return 0;
2187}
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209static xfs_lsn_t
2210xlog_recover_get_buf_lsn(
2211 struct xfs_mount *mp,
2212 struct xfs_buf *bp)
2213{
2214 __uint32_t magic32;
2215 __uint16_t magic16;
2216 __uint16_t magicda;
2217 void *blk = bp->b_addr;
2218 uuid_t *uuid;
2219 xfs_lsn_t lsn = -1;
2220
2221
2222 if (!xfs_sb_version_hascrc(&mp->m_sb))
2223 goto recover_immediately;
2224
2225 magic32 = be32_to_cpu(*(__be32 *)blk);
2226 switch (magic32) {
2227 case XFS_ABTB_CRC_MAGIC:
2228 case XFS_ABTC_CRC_MAGIC:
2229 case XFS_ABTB_MAGIC:
2230 case XFS_ABTC_MAGIC:
2231 case XFS_IBT_CRC_MAGIC:
2232 case XFS_IBT_MAGIC: {
2233 struct xfs_btree_block *btb = blk;
2234
2235 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2236 uuid = &btb->bb_u.s.bb_uuid;
2237 break;
2238 }
2239 case XFS_BMAP_CRC_MAGIC:
2240 case XFS_BMAP_MAGIC: {
2241 struct xfs_btree_block *btb = blk;
2242
2243 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2244 uuid = &btb->bb_u.l.bb_uuid;
2245 break;
2246 }
2247 case XFS_AGF_MAGIC:
2248 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2249 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2250 break;
2251 case XFS_AGFL_MAGIC:
2252 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2253 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2254 break;
2255 case XFS_AGI_MAGIC:
2256 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2257 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2258 break;
2259 case XFS_SYMLINK_MAGIC:
2260 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2261 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2262 break;
2263 case XFS_DIR3_BLOCK_MAGIC:
2264 case XFS_DIR3_DATA_MAGIC:
2265 case XFS_DIR3_FREE_MAGIC:
2266 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2267 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2268 break;
2269 case XFS_ATTR3_RMT_MAGIC:
2270
2271
2272
2273
2274
2275
2276
2277 goto recover_immediately;
2278 case XFS_SB_MAGIC:
2279
2280
2281
2282
2283
2284
2285
2286 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2287 if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2288 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2289 else
2290 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2291 break;
2292 default:
2293 break;
2294 }
2295
2296 if (lsn != (xfs_lsn_t)-1) {
2297 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2298 goto recover_immediately;
2299 return lsn;
2300 }
2301
2302 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2303 switch (magicda) {
2304 case XFS_DIR3_LEAF1_MAGIC:
2305 case XFS_DIR3_LEAFN_MAGIC:
2306 case XFS_DA3_NODE_MAGIC:
2307 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2308 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2309 break;
2310 default:
2311 break;
2312 }
2313
2314 if (lsn != (xfs_lsn_t)-1) {
2315 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2316 goto recover_immediately;
2317 return lsn;
2318 }
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331 magic16 = be16_to_cpu(*(__be16 *)blk);
2332 switch (magic16) {
2333 case XFS_DQUOT_MAGIC:
2334 case XFS_DINODE_MAGIC:
2335 goto recover_immediately;
2336 default:
2337 break;
2338 }
2339
2340
2341
2342recover_immediately:
2343 return (xfs_lsn_t)-1;
2344
2345}
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355static void
2356xlog_recover_validate_buf_type(
2357 struct xfs_mount *mp,
2358 struct xfs_buf *bp,
2359 xfs_buf_log_format_t *buf_f)
2360{
2361 struct xfs_da_blkinfo *info = bp->b_addr;
2362 __uint32_t magic32;
2363 __uint16_t magic16;
2364 __uint16_t magicda;
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374 if (!xfs_sb_version_hascrc(&mp->m_sb))
2375 return;
2376
2377 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2378 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2379 magicda = be16_to_cpu(info->magic);
2380 switch (xfs_blft_from_flags(buf_f)) {
2381 case XFS_BLFT_BTREE_BUF:
2382 switch (magic32) {
2383 case XFS_ABTB_CRC_MAGIC:
2384 case XFS_ABTC_CRC_MAGIC:
2385 case XFS_ABTB_MAGIC:
2386 case XFS_ABTC_MAGIC:
2387 bp->b_ops = &xfs_allocbt_buf_ops;
2388 break;
2389 case XFS_IBT_CRC_MAGIC:
2390 case XFS_FIBT_CRC_MAGIC:
2391 case XFS_IBT_MAGIC:
2392 case XFS_FIBT_MAGIC:
2393 bp->b_ops = &xfs_inobt_buf_ops;
2394 break;
2395 case XFS_BMAP_CRC_MAGIC:
2396 case XFS_BMAP_MAGIC:
2397 bp->b_ops = &xfs_bmbt_buf_ops;
2398 break;
2399 default:
2400 xfs_warn(mp, "Bad btree block magic!");
2401 ASSERT(0);
2402 break;
2403 }
2404 break;
2405 case XFS_BLFT_AGF_BUF:
2406 if (magic32 != XFS_AGF_MAGIC) {
2407 xfs_warn(mp, "Bad AGF block magic!");
2408 ASSERT(0);
2409 break;
2410 }
2411 bp->b_ops = &xfs_agf_buf_ops;
2412 break;
2413 case XFS_BLFT_AGFL_BUF:
2414 if (magic32 != XFS_AGFL_MAGIC) {
2415 xfs_warn(mp, "Bad AGFL block magic!");
2416 ASSERT(0);
2417 break;
2418 }
2419 bp->b_ops = &xfs_agfl_buf_ops;
2420 break;
2421 case XFS_BLFT_AGI_BUF:
2422 if (magic32 != XFS_AGI_MAGIC) {
2423 xfs_warn(mp, "Bad AGI block magic!");
2424 ASSERT(0);
2425 break;
2426 }
2427 bp->b_ops = &xfs_agi_buf_ops;
2428 break;
2429 case XFS_BLFT_UDQUOT_BUF:
2430 case XFS_BLFT_PDQUOT_BUF:
2431 case XFS_BLFT_GDQUOT_BUF:
2432#ifdef CONFIG_XFS_QUOTA
2433 if (magic16 != XFS_DQUOT_MAGIC) {
2434 xfs_warn(mp, "Bad DQUOT block magic!");
2435 ASSERT(0);
2436 break;
2437 }
2438 bp->b_ops = &xfs_dquot_buf_ops;
2439#else
2440 xfs_alert(mp,
2441 "Trying to recover dquots without QUOTA support built in!");
2442 ASSERT(0);
2443#endif
2444 break;
2445 case XFS_BLFT_DINO_BUF:
2446 if (magic16 != XFS_DINODE_MAGIC) {
2447 xfs_warn(mp, "Bad INODE block magic!");
2448 ASSERT(0);
2449 break;
2450 }
2451 bp->b_ops = &xfs_inode_buf_ops;
2452 break;
2453 case XFS_BLFT_SYMLINK_BUF:
2454 if (magic32 != XFS_SYMLINK_MAGIC) {
2455 xfs_warn(mp, "Bad symlink block magic!");
2456 ASSERT(0);
2457 break;
2458 }
2459 bp->b_ops = &xfs_symlink_buf_ops;
2460 break;
2461 case XFS_BLFT_DIR_BLOCK_BUF:
2462 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2463 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2464 xfs_warn(mp, "Bad dir block magic!");
2465 ASSERT(0);
2466 break;
2467 }
2468 bp->b_ops = &xfs_dir3_block_buf_ops;
2469 break;
2470 case XFS_BLFT_DIR_DATA_BUF:
2471 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2472 magic32 != XFS_DIR3_DATA_MAGIC) {
2473 xfs_warn(mp, "Bad dir data magic!");
2474 ASSERT(0);
2475 break;
2476 }
2477 bp->b_ops = &xfs_dir3_data_buf_ops;
2478 break;
2479 case XFS_BLFT_DIR_FREE_BUF:
2480 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2481 magic32 != XFS_DIR3_FREE_MAGIC) {
2482 xfs_warn(mp, "Bad dir3 free magic!");
2483 ASSERT(0);
2484 break;
2485 }
2486 bp->b_ops = &xfs_dir3_free_buf_ops;
2487 break;
2488 case XFS_BLFT_DIR_LEAF1_BUF:
2489 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2490 magicda != XFS_DIR3_LEAF1_MAGIC) {
2491 xfs_warn(mp, "Bad dir leaf1 magic!");
2492 ASSERT(0);
2493 break;
2494 }
2495 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2496 break;
2497 case XFS_BLFT_DIR_LEAFN_BUF:
2498 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2499 magicda != XFS_DIR3_LEAFN_MAGIC) {
2500 xfs_warn(mp, "Bad dir leafn magic!");
2501 ASSERT(0);
2502 break;
2503 }
2504 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2505 break;
2506 case XFS_BLFT_DA_NODE_BUF:
2507 if (magicda != XFS_DA_NODE_MAGIC &&
2508 magicda != XFS_DA3_NODE_MAGIC) {
2509 xfs_warn(mp, "Bad da node magic!");
2510 ASSERT(0);
2511 break;
2512 }
2513 bp->b_ops = &xfs_da3_node_buf_ops;
2514 break;
2515 case XFS_BLFT_ATTR_LEAF_BUF:
2516 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2517 magicda != XFS_ATTR3_LEAF_MAGIC) {
2518 xfs_warn(mp, "Bad attr leaf magic!");
2519 ASSERT(0);
2520 break;
2521 }
2522 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2523 break;
2524 case XFS_BLFT_ATTR_RMT_BUF:
2525 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2526 xfs_warn(mp, "Bad attr remote magic!");
2527 ASSERT(0);
2528 break;
2529 }
2530 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2531 break;
2532 case XFS_BLFT_SB_BUF:
2533 if (magic32 != XFS_SB_MAGIC) {
2534 xfs_warn(mp, "Bad SB block magic!");
2535 ASSERT(0);
2536 break;
2537 }
2538 bp->b_ops = &xfs_sb_buf_ops;
2539 break;
2540#ifdef CONFIG_XFS_RT
2541 case XFS_BLFT_RTBITMAP_BUF:
2542 case XFS_BLFT_RTSUMMARY_BUF:
2543
2544 bp->b_ops = &xfs_rtbuf_ops;
2545 break;
2546#endif
2547 default:
2548 xfs_warn(mp, "Unknown buffer type %d!",
2549 xfs_blft_from_flags(buf_f));
2550 break;
2551 }
2552}
2553
2554
2555
2556
2557
2558
2559
2560STATIC void
2561xlog_recover_do_reg_buffer(
2562 struct xfs_mount *mp,
2563 xlog_recover_item_t *item,
2564 struct xfs_buf *bp,
2565 xfs_buf_log_format_t *buf_f)
2566{
2567 int i;
2568 int bit;
2569 int nbits;
2570 int error;
2571
2572 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2573
2574 bit = 0;
2575 i = 1;
2576 while (1) {
2577 bit = xfs_next_bit(buf_f->blf_data_map,
2578 buf_f->blf_map_size, bit);
2579 if (bit == -1)
2580 break;
2581 nbits = xfs_contig_bits(buf_f->blf_data_map,
2582 buf_f->blf_map_size, bit);
2583 ASSERT(nbits > 0);
2584 ASSERT(item->ri_buf[i].i_addr != NULL);
2585 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2586 ASSERT(BBTOB(bp->b_io_length) >=
2587 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2598 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2599
2600
2601
2602
2603
2604
2605 error = 0;
2606 if (buf_f->blf_flags &
2607 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2608 if (item->ri_buf[i].i_addr == NULL) {
2609 xfs_alert(mp,
2610 "XFS: NULL dquot in %s.", __func__);
2611 goto next;
2612 }
2613 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2614 xfs_alert(mp,
2615 "XFS: dquot too small (%d) in %s.",
2616 item->ri_buf[i].i_len, __func__);
2617 goto next;
2618 }
2619 error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
2620 -1, 0, XFS_QMOPT_DOWARN,
2621 "dquot_buf_recover");
2622 if (error)
2623 goto next;
2624 }
2625
2626 memcpy(xfs_buf_offset(bp,
2627 (uint)bit << XFS_BLF_SHIFT),
2628 item->ri_buf[i].i_addr,
2629 nbits<<XFS_BLF_SHIFT);
2630 next:
2631 i++;
2632 bit += nbits;
2633 }
2634
2635
2636 ASSERT(i == item->ri_total);
2637
2638 xlog_recover_validate_buf_type(mp, bp, buf_f);
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650STATIC bool
2651xlog_recover_do_dquot_buffer(
2652 struct xfs_mount *mp,
2653 struct xlog *log,
2654 struct xlog_recover_item *item,
2655 struct xfs_buf *bp,
2656 struct xfs_buf_log_format *buf_f)
2657{
2658 uint type;
2659
2660 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2661
2662
2663
2664
2665 if (!mp->m_qflags)
2666 return false;
2667
2668 type = 0;
2669 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2670 type |= XFS_DQ_USER;
2671 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2672 type |= XFS_DQ_PROJ;
2673 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2674 type |= XFS_DQ_GROUP;
2675
2676
2677
2678 if (log->l_quotaoffs_flag & type)
2679 return false;
2680
2681 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2682 return true;
2683}
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708STATIC int
2709xlog_recover_buffer_pass2(
2710 struct xlog *log,
2711 struct list_head *buffer_list,
2712 struct xlog_recover_item *item,
2713 xfs_lsn_t current_lsn)
2714{
2715 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2716 xfs_mount_t *mp = log->l_mp;
2717 xfs_buf_t *bp;
2718 int error;
2719 uint buf_flags;
2720 xfs_lsn_t lsn;
2721
2722
2723
2724
2725
2726 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2727 buf_f->blf_len, buf_f->blf_flags)) {
2728 trace_xfs_log_recover_buf_cancel(log, buf_f);
2729 return 0;
2730 }
2731
2732 trace_xfs_log_recover_buf_recover(log, buf_f);
2733
2734 buf_flags = 0;
2735 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2736 buf_flags |= XBF_UNMAPPED;
2737
2738 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2739 buf_flags, NULL);
2740 if (!bp)
2741 return -ENOMEM;
2742 error = bp->b_error;
2743 if (error) {
2744 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2745 goto out_release;
2746 }
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767 lsn = xlog_recover_get_buf_lsn(mp, bp);
2768 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2769 xlog_recover_validate_buf_type(mp, bp, buf_f);
2770 goto out_release;
2771 }
2772
2773 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2774 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2775 if (error)
2776 goto out_release;
2777 } else if (buf_f->blf_flags &
2778 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2779 bool dirty;
2780
2781 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2782 if (!dirty)
2783 goto out_release;
2784 } else {
2785 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2786 }
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803 if (XFS_DINODE_MAGIC ==
2804 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2805 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2806 (__uint32_t)log->l_mp->m_inode_cluster_size))) {
2807 xfs_buf_stale(bp);
2808 error = xfs_bwrite(bp);
2809 } else {
2810 ASSERT(bp->b_target->bt_mount == mp);
2811 bp->b_iodone = xlog_recover_iodone;
2812 xfs_buf_delwri_queue(bp, buffer_list);
2813 }
2814
2815out_release:
2816 xfs_buf_relse(bp);
2817 return error;
2818}
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850STATIC int
2851xfs_recover_inode_owner_change(
2852 struct xfs_mount *mp,
2853 struct xfs_dinode *dip,
2854 struct xfs_inode_log_format *in_f,
2855 struct list_head *buffer_list)
2856{
2857 struct xfs_inode *ip;
2858 int error;
2859
2860 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2861
2862 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2863 if (!ip)
2864 return -ENOMEM;
2865
2866
2867 xfs_inode_from_disk(ip, dip);
2868 ASSERT(ip->i_d.di_version >= 3);
2869
2870 error = xfs_iformat_fork(ip, dip);
2871 if (error)
2872 goto out_free_ip;
2873
2874
2875 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2876 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2877 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2878 ip->i_ino, buffer_list);
2879 if (error)
2880 goto out_free_ip;
2881 }
2882
2883 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2884 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2885 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2886 ip->i_ino, buffer_list);
2887 if (error)
2888 goto out_free_ip;
2889 }
2890
2891out_free_ip:
2892 xfs_inode_free(ip);
2893 return error;
2894}
2895
2896STATIC int
2897xlog_recover_inode_pass2(
2898 struct xlog *log,
2899 struct list_head *buffer_list,
2900 struct xlog_recover_item *item,
2901 xfs_lsn_t current_lsn)
2902{
2903 xfs_inode_log_format_t *in_f;
2904 xfs_mount_t *mp = log->l_mp;
2905 xfs_buf_t *bp;
2906 xfs_dinode_t *dip;
2907 int len;
2908 char *src;
2909 char *dest;
2910 int error;
2911 int attr_index;
2912 uint fields;
2913 struct xfs_log_dinode *ldip;
2914 uint isize;
2915 int need_free = 0;
2916
2917 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2918 in_f = item->ri_buf[0].i_addr;
2919 } else {
2920 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2921 need_free = 1;
2922 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2923 if (error)
2924 goto error;
2925 }
2926
2927
2928
2929
2930
2931 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2932 in_f->ilf_len, 0)) {
2933 error = 0;
2934 trace_xfs_log_recover_inode_cancel(log, in_f);
2935 goto error;
2936 }
2937 trace_xfs_log_recover_inode_recover(log, in_f);
2938
2939 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2940 &xfs_inode_buf_ops);
2941 if (!bp) {
2942 error = -ENOMEM;
2943 goto error;
2944 }
2945 error = bp->b_error;
2946 if (error) {
2947 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2948 goto out_release;
2949 }
2950 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2951 dip = xfs_buf_offset(bp, in_f->ilf_boffset);
2952
2953
2954
2955
2956
2957 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2958 xfs_alert(mp,
2959 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2960 __func__, dip, bp, in_f->ilf_ino);
2961 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2962 XFS_ERRLEVEL_LOW, mp);
2963 error = -EFSCORRUPTED;
2964 goto out_release;
2965 }
2966 ldip = item->ri_buf[1].i_addr;
2967 if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
2968 xfs_alert(mp,
2969 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2970 __func__, item, in_f->ilf_ino);
2971 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2972 XFS_ERRLEVEL_LOW, mp);
2973 error = -EFSCORRUPTED;
2974 goto out_release;
2975 }
2976
2977
2978
2979
2980
2981
2982
2983
2984 if (dip->di_version >= 3) {
2985 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
2986
2987 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2988 trace_xfs_log_recover_inode_skip(log, in_f);
2989 error = 0;
2990 goto out_owner_change;
2991 }
2992 }
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3003 ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3004
3005
3006
3007
3008 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3009 ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3010
3011 } else {
3012 trace_xfs_log_recover_inode_skip(log, in_f);
3013 error = 0;
3014 goto out_release;
3015 }
3016 }
3017
3018
3019 ldip->di_flushiter = 0;
3020
3021 if (unlikely(S_ISREG(ldip->di_mode))) {
3022 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3023 (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3024 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3025 XFS_ERRLEVEL_LOW, mp, ldip);
3026 xfs_alert(mp,
3027 "%s: Bad regular inode log record, rec ptr 0x%p, "
3028 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3029 __func__, item, dip, bp, in_f->ilf_ino);
3030 error = -EFSCORRUPTED;
3031 goto out_release;
3032 }
3033 } else if (unlikely(S_ISDIR(ldip->di_mode))) {
3034 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3035 (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3036 (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3037 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3038 XFS_ERRLEVEL_LOW, mp, ldip);
3039 xfs_alert(mp,
3040 "%s: Bad dir inode log record, rec ptr 0x%p, "
3041 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3042 __func__, item, dip, bp, in_f->ilf_ino);
3043 error = -EFSCORRUPTED;
3044 goto out_release;
3045 }
3046 }
3047 if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3048 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3049 XFS_ERRLEVEL_LOW, mp, ldip);
3050 xfs_alert(mp,
3051 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3052 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
3053 __func__, item, dip, bp, in_f->ilf_ino,
3054 ldip->di_nextents + ldip->di_anextents,
3055 ldip->di_nblocks);
3056 error = -EFSCORRUPTED;
3057 goto out_release;
3058 }
3059 if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3060 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3061 XFS_ERRLEVEL_LOW, mp, ldip);
3062 xfs_alert(mp,
3063 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3064 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
3065 item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3066 error = -EFSCORRUPTED;
3067 goto out_release;
3068 }
3069 isize = xfs_log_dinode_size(ldip->di_version);
3070 if (unlikely(item->ri_buf[1].i_len > isize)) {
3071 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3072 XFS_ERRLEVEL_LOW, mp, ldip);
3073 xfs_alert(mp,
3074 "%s: Bad inode log record length %d, rec ptr 0x%p",
3075 __func__, item->ri_buf[1].i_len, item);
3076 error = -EFSCORRUPTED;
3077 goto out_release;
3078 }
3079
3080
3081 xfs_log_dinode_to_disk(ldip, dip);
3082
3083
3084 if (item->ri_buf[1].i_len > isize) {
3085 memcpy((char *)dip + isize,
3086 item->ri_buf[1].i_addr + isize,
3087 item->ri_buf[1].i_len - isize);
3088 }
3089
3090 fields = in_f->ilf_fields;
3091 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
3092 case XFS_ILOG_DEV:
3093 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3094 break;
3095 case XFS_ILOG_UUID:
3096 memcpy(XFS_DFORK_DPTR(dip),
3097 &in_f->ilf_u.ilfu_uuid,
3098 sizeof(uuid_t));
3099 break;
3100 }
3101
3102 if (in_f->ilf_size == 2)
3103 goto out_owner_change;
3104 len = item->ri_buf[2].i_len;
3105 src = item->ri_buf[2].i_addr;
3106 ASSERT(in_f->ilf_size <= 4);
3107 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3108 ASSERT(!(fields & XFS_ILOG_DFORK) ||
3109 (len == in_f->ilf_dsize));
3110
3111 switch (fields & XFS_ILOG_DFORK) {
3112 case XFS_ILOG_DDATA:
3113 case XFS_ILOG_DEXT:
3114 memcpy(XFS_DFORK_DPTR(dip), src, len);
3115 break;
3116
3117 case XFS_ILOG_DBROOT:
3118 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3119 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3120 XFS_DFORK_DSIZE(dip, mp));
3121 break;
3122
3123 default:
3124
3125
3126
3127 ASSERT((fields & XFS_ILOG_DFORK) == 0);
3128 break;
3129 }
3130
3131
3132
3133
3134
3135
3136 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3137 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3138 attr_index = 3;
3139 } else {
3140 attr_index = 2;
3141 }
3142 len = item->ri_buf[attr_index].i_len;
3143 src = item->ri_buf[attr_index].i_addr;
3144 ASSERT(len == in_f->ilf_asize);
3145
3146 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3147 case XFS_ILOG_ADATA:
3148 case XFS_ILOG_AEXT:
3149 dest = XFS_DFORK_APTR(dip);
3150 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3151 memcpy(dest, src, len);
3152 break;
3153
3154 case XFS_ILOG_ABROOT:
3155 dest = XFS_DFORK_APTR(dip);
3156 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3157 len, (xfs_bmdr_block_t*)dest,
3158 XFS_DFORK_ASIZE(dip, mp));
3159 break;
3160
3161 default:
3162 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3163 ASSERT(0);
3164 error = -EIO;
3165 goto out_release;
3166 }
3167 }
3168
3169out_owner_change:
3170 if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3171 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3172 buffer_list);
3173
3174 xfs_dinode_calc_crc(log->l_mp, dip);
3175
3176 ASSERT(bp->b_target->bt_mount == mp);
3177 bp->b_iodone = xlog_recover_iodone;
3178 xfs_buf_delwri_queue(bp, buffer_list);
3179
3180out_release:
3181 xfs_buf_relse(bp);
3182error:
3183 if (need_free)
3184 kmem_free(in_f);
3185 return error;
3186}
3187
3188
3189
3190
3191
3192
3193STATIC int
3194xlog_recover_quotaoff_pass1(
3195 struct xlog *log,
3196 struct xlog_recover_item *item)
3197{
3198 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3199 ASSERT(qoff_f);
3200
3201
3202
3203
3204
3205 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3206 log->l_quotaoffs_flag |= XFS_DQ_USER;
3207 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3208 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3209 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3210 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3211
3212 return 0;
3213}
3214
3215
3216
3217
3218STATIC int
3219xlog_recover_dquot_pass2(
3220 struct xlog *log,
3221 struct list_head *buffer_list,
3222 struct xlog_recover_item *item,
3223 xfs_lsn_t current_lsn)
3224{
3225 xfs_mount_t *mp = log->l_mp;
3226 xfs_buf_t *bp;
3227 struct xfs_disk_dquot *ddq, *recddq;
3228 int error;
3229 xfs_dq_logformat_t *dq_f;
3230 uint type;
3231
3232
3233
3234
3235
3236 if (mp->m_qflags == 0)
3237 return 0;
3238
3239 recddq = item->ri_buf[1].i_addr;
3240 if (recddq == NULL) {
3241 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3242 return -EIO;
3243 }
3244 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3245 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3246 item->ri_buf[1].i_len, __func__);
3247 return -EIO;
3248 }
3249
3250
3251
3252
3253 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3254 ASSERT(type);
3255 if (log->l_quotaoffs_flag & type)
3256 return 0;
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268 dq_f = item->ri_buf[0].i_addr;
3269 ASSERT(dq_f);
3270 error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3271 "xlog_recover_dquot_pass2 (log copy)");
3272 if (error)
3273 return -EIO;
3274 ASSERT(dq_f->qlf_len == 1);
3275
3276
3277
3278
3279
3280
3281
3282
3283 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3284 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3285 &xfs_dquot_buf_ops);
3286 if (error)
3287 return error;
3288
3289 ASSERT(bp);
3290 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3291
3292
3293
3294
3295
3296 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3297 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3298 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3299
3300 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3301 goto out_release;
3302 }
3303 }
3304
3305 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3306 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3307 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3308 XFS_DQUOT_CRC_OFF);
3309 }
3310
3311 ASSERT(dq_f->qlf_size == 2);
3312 ASSERT(bp->b_target->bt_mount == mp);
3313 bp->b_iodone = xlog_recover_iodone;
3314 xfs_buf_delwri_queue(bp, buffer_list);
3315
3316out_release:
3317 xfs_buf_relse(bp);
3318 return 0;
3319}
3320
3321
3322
3323
3324
3325
3326
3327
3328STATIC int
3329xlog_recover_efi_pass2(
3330 struct xlog *log,
3331 struct xlog_recover_item *item,
3332 xfs_lsn_t lsn)
3333{
3334 int error;
3335 struct xfs_mount *mp = log->l_mp;
3336 struct xfs_efi_log_item *efip;
3337 struct xfs_efi_log_format *efi_formatp;
3338
3339 efi_formatp = item->ri_buf[0].i_addr;
3340
3341 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3342 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3343 if (error) {
3344 xfs_efi_item_free(efip);
3345 return error;
3346 }
3347 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3348
3349 spin_lock(&log->l_ailp->xa_lock);
3350
3351
3352
3353
3354
3355
3356 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3357 xfs_efi_release(efip);
3358 return 0;
3359}
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369STATIC int
3370xlog_recover_efd_pass2(
3371 struct xlog *log,
3372 struct xlog_recover_item *item)
3373{
3374 xfs_efd_log_format_t *efd_formatp;
3375 xfs_efi_log_item_t *efip = NULL;
3376 xfs_log_item_t *lip;
3377 __uint64_t efi_id;
3378 struct xfs_ail_cursor cur;
3379 struct xfs_ail *ailp = log->l_ailp;
3380
3381 efd_formatp = item->ri_buf[0].i_addr;
3382 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3383 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3384 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3385 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3386 efi_id = efd_formatp->efd_efi_id;
3387
3388
3389
3390
3391
3392 spin_lock(&ailp->xa_lock);
3393 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3394 while (lip != NULL) {
3395 if (lip->li_type == XFS_LI_EFI) {
3396 efip = (xfs_efi_log_item_t *)lip;
3397 if (efip->efi_format.efi_id == efi_id) {
3398
3399
3400
3401
3402 spin_unlock(&ailp->xa_lock);
3403 xfs_efi_release(efip);
3404 spin_lock(&ailp->xa_lock);
3405 break;
3406 }
3407 }
3408 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3409 }
3410
3411 xfs_trans_ail_cursor_done(&cur);
3412 spin_unlock(&ailp->xa_lock);
3413
3414 return 0;
3415}
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425STATIC int
3426xlog_recover_do_icreate_pass2(
3427 struct xlog *log,
3428 struct list_head *buffer_list,
3429 xlog_recover_item_t *item)
3430{
3431 struct xfs_mount *mp = log->l_mp;
3432 struct xfs_icreate_log *icl;
3433 xfs_agnumber_t agno;
3434 xfs_agblock_t agbno;
3435 unsigned int count;
3436 unsigned int isize;
3437 xfs_agblock_t length;
3438 int blks_per_cluster;
3439 int bb_per_cluster;
3440 int cancel_count;
3441 int nbufs;
3442 int i;
3443
3444 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3445 if (icl->icl_type != XFS_LI_ICREATE) {
3446 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3447 return -EINVAL;
3448 }
3449
3450 if (icl->icl_size != 1) {
3451 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3452 return -EINVAL;
3453 }
3454
3455 agno = be32_to_cpu(icl->icl_ag);
3456 if (agno >= mp->m_sb.sb_agcount) {
3457 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3458 return -EINVAL;
3459 }
3460 agbno = be32_to_cpu(icl->icl_agbno);
3461 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3462 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3463 return -EINVAL;
3464 }
3465 isize = be32_to_cpu(icl->icl_isize);
3466 if (isize != mp->m_sb.sb_inodesize) {
3467 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3468 return -EINVAL;
3469 }
3470 count = be32_to_cpu(icl->icl_count);
3471 if (!count) {
3472 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3473 return -EINVAL;
3474 }
3475 length = be32_to_cpu(icl->icl_length);
3476 if (!length || length >= mp->m_sb.sb_agblocks) {
3477 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3478 return -EINVAL;
3479 }
3480
3481
3482
3483
3484
3485 if (length != mp->m_ialloc_blks &&
3486 length != mp->m_ialloc_min_blks) {
3487 xfs_warn(log->l_mp,
3488 "%s: unsupported chunk length", __FUNCTION__);
3489 return -EINVAL;
3490 }
3491
3492
3493 if ((count >> mp->m_sb.sb_inopblog) != length) {
3494 xfs_warn(log->l_mp,
3495 "%s: inconsistent inode count and chunk length",
3496 __FUNCTION__);
3497 return -EINVAL;
3498 }
3499
3500
3501
3502
3503
3504
3505
3506 blks_per_cluster = xfs_icluster_size_fsb(mp);
3507 bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
3508 nbufs = length / blks_per_cluster;
3509 for (i = 0, cancel_count = 0; i < nbufs; i++) {
3510 xfs_daddr_t daddr;
3511
3512 daddr = XFS_AGB_TO_DADDR(mp, agno,
3513 agbno + i * blks_per_cluster);
3514 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3515 cancel_count++;
3516 }
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528 ASSERT(!cancel_count || cancel_count == nbufs);
3529 if (cancel_count) {
3530 if (cancel_count != nbufs)
3531 xfs_warn(mp,
3532 "WARNING: partial inode chunk cancellation, skipped icreate.");
3533 trace_xfs_log_recover_icreate_cancel(log, icl);
3534 return 0;
3535 }
3536
3537 trace_xfs_log_recover_icreate_recover(log, icl);
3538 return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3539 length, be32_to_cpu(icl->icl_gen));
3540}
3541
3542STATIC void
3543xlog_recover_buffer_ra_pass2(
3544 struct xlog *log,
3545 struct xlog_recover_item *item)
3546{
3547 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3548 struct xfs_mount *mp = log->l_mp;
3549
3550 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3551 buf_f->blf_len, buf_f->blf_flags)) {
3552 return;
3553 }
3554
3555 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3556 buf_f->blf_len, NULL);
3557}
3558
3559STATIC void
3560xlog_recover_inode_ra_pass2(
3561 struct xlog *log,
3562 struct xlog_recover_item *item)
3563{
3564 struct xfs_inode_log_format ilf_buf;
3565 struct xfs_inode_log_format *ilfp;
3566 struct xfs_mount *mp = log->l_mp;
3567 int error;
3568
3569 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3570 ilfp = item->ri_buf[0].i_addr;
3571 } else {
3572 ilfp = &ilf_buf;
3573 memset(ilfp, 0, sizeof(*ilfp));
3574 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3575 if (error)
3576 return;
3577 }
3578
3579 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3580 return;
3581
3582 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3583 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3584}
3585
3586STATIC void
3587xlog_recover_dquot_ra_pass2(
3588 struct xlog *log,
3589 struct xlog_recover_item *item)
3590{
3591 struct xfs_mount *mp = log->l_mp;
3592 struct xfs_disk_dquot *recddq;
3593 struct xfs_dq_logformat *dq_f;
3594 uint type;
3595 int len;
3596
3597
3598 if (mp->m_qflags == 0)
3599 return;
3600
3601 recddq = item->ri_buf[1].i_addr;
3602 if (recddq == NULL)
3603 return;
3604 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3605 return;
3606
3607 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3608 ASSERT(type);
3609 if (log->l_quotaoffs_flag & type)
3610 return;
3611
3612 dq_f = item->ri_buf[0].i_addr;
3613 ASSERT(dq_f);
3614 ASSERT(dq_f->qlf_len == 1);
3615
3616 len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
3617 if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
3618 return;
3619
3620 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
3621 &xfs_dquot_buf_ra_ops);
3622}
3623
3624STATIC void
3625xlog_recover_ra_pass2(
3626 struct xlog *log,
3627 struct xlog_recover_item *item)
3628{
3629 switch (ITEM_TYPE(item)) {
3630 case XFS_LI_BUF:
3631 xlog_recover_buffer_ra_pass2(log, item);
3632 break;
3633 case XFS_LI_INODE:
3634 xlog_recover_inode_ra_pass2(log, item);
3635 break;
3636 case XFS_LI_DQUOT:
3637 xlog_recover_dquot_ra_pass2(log, item);
3638 break;
3639 case XFS_LI_EFI:
3640 case XFS_LI_EFD:
3641 case XFS_LI_QUOTAOFF:
3642 default:
3643 break;
3644 }
3645}
3646
3647STATIC int
3648xlog_recover_commit_pass1(
3649 struct xlog *log,
3650 struct xlog_recover *trans,
3651 struct xlog_recover_item *item)
3652{
3653 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3654
3655 switch (ITEM_TYPE(item)) {
3656 case XFS_LI_BUF:
3657 return xlog_recover_buffer_pass1(log, item);
3658 case XFS_LI_QUOTAOFF:
3659 return xlog_recover_quotaoff_pass1(log, item);
3660 case XFS_LI_INODE:
3661 case XFS_LI_EFI:
3662 case XFS_LI_EFD:
3663 case XFS_LI_DQUOT:
3664 case XFS_LI_ICREATE:
3665
3666 return 0;
3667 default:
3668 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3669 __func__, ITEM_TYPE(item));
3670 ASSERT(0);
3671 return -EIO;
3672 }
3673}
3674
3675STATIC int
3676xlog_recover_commit_pass2(
3677 struct xlog *log,
3678 struct xlog_recover *trans,
3679 struct list_head *buffer_list,
3680 struct xlog_recover_item *item)
3681{
3682 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3683
3684 switch (ITEM_TYPE(item)) {
3685 case XFS_LI_BUF:
3686 return xlog_recover_buffer_pass2(log, buffer_list, item,
3687 trans->r_lsn);
3688 case XFS_LI_INODE:
3689 return xlog_recover_inode_pass2(log, buffer_list, item,
3690 trans->r_lsn);
3691 case XFS_LI_EFI:
3692 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3693 case XFS_LI_EFD:
3694 return xlog_recover_efd_pass2(log, item);
3695 case XFS_LI_DQUOT:
3696 return xlog_recover_dquot_pass2(log, buffer_list, item,
3697 trans->r_lsn);
3698 case XFS_LI_ICREATE:
3699 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3700 case XFS_LI_QUOTAOFF:
3701
3702 return 0;
3703 default:
3704 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3705 __func__, ITEM_TYPE(item));
3706 ASSERT(0);
3707 return -EIO;
3708 }
3709}
3710
3711STATIC int
3712xlog_recover_items_pass2(
3713 struct xlog *log,
3714 struct xlog_recover *trans,
3715 struct list_head *buffer_list,
3716 struct list_head *item_list)
3717{
3718 struct xlog_recover_item *item;
3719 int error = 0;
3720
3721 list_for_each_entry(item, item_list, ri_list) {
3722 error = xlog_recover_commit_pass2(log, trans,
3723 buffer_list, item);
3724 if (error)
3725 return error;
3726 }
3727
3728 return error;
3729}
3730
3731
3732
3733
3734
3735
3736
3737STATIC int
3738xlog_recover_commit_trans(
3739 struct xlog *log,
3740 struct xlog_recover *trans,
3741 int pass)
3742{
3743 int error = 0;
3744 int error2;
3745 int items_queued = 0;
3746 struct xlog_recover_item *item;
3747 struct xlog_recover_item *next;
3748 LIST_HEAD (buffer_list);
3749 LIST_HEAD (ra_list);
3750 LIST_HEAD (done_list);
3751
3752 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3753
3754 hlist_del(&trans->r_list);
3755
3756 error = xlog_recover_reorder_trans(log, trans, pass);
3757 if (error)
3758 return error;
3759
3760 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
3761 switch (pass) {
3762 case XLOG_RECOVER_PASS1:
3763 error = xlog_recover_commit_pass1(log, trans, item);
3764 break;
3765 case XLOG_RECOVER_PASS2:
3766 xlog_recover_ra_pass2(log, item);
3767 list_move_tail(&item->ri_list, &ra_list);
3768 items_queued++;
3769 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
3770 error = xlog_recover_items_pass2(log, trans,
3771 &buffer_list, &ra_list);
3772 list_splice_tail_init(&ra_list, &done_list);
3773 items_queued = 0;
3774 }
3775
3776 break;
3777 default:
3778 ASSERT(0);
3779 }
3780
3781 if (error)
3782 goto out;
3783 }
3784
3785out:
3786 if (!list_empty(&ra_list)) {
3787 if (!error)
3788 error = xlog_recover_items_pass2(log, trans,
3789 &buffer_list, &ra_list);
3790 list_splice_tail_init(&ra_list, &done_list);
3791 }
3792
3793 if (!list_empty(&done_list))
3794 list_splice_init(&done_list, &trans->r_itemq);
3795
3796 error2 = xfs_buf_delwri_submit(&buffer_list);
3797 return error ? error : error2;
3798}
3799
3800STATIC void
3801xlog_recover_add_item(
3802 struct list_head *head)
3803{
3804 xlog_recover_item_t *item;
3805
3806 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
3807 INIT_LIST_HEAD(&item->ri_list);
3808 list_add_tail(&item->ri_list, head);
3809}
3810
3811STATIC int
3812xlog_recover_add_to_cont_trans(
3813 struct xlog *log,
3814 struct xlog_recover *trans,
3815 char *dp,
3816 int len)
3817{
3818 xlog_recover_item_t *item;
3819 char *ptr, *old_ptr;
3820 int old_len;
3821
3822
3823
3824
3825
3826 if (list_empty(&trans->r_itemq)) {
3827 ASSERT(len <= sizeof(struct xfs_trans_header));
3828 if (len > sizeof(struct xfs_trans_header)) {
3829 xfs_warn(log->l_mp, "%s: bad header length", __func__);
3830 return -EIO;
3831 }
3832
3833 xlog_recover_add_item(&trans->r_itemq);
3834 ptr = (char *)&trans->r_theader +
3835 sizeof(struct xfs_trans_header) - len;
3836 memcpy(ptr, dp, len);
3837 return 0;
3838 }
3839
3840
3841 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
3842
3843 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
3844 old_len = item->ri_buf[item->ri_cnt-1].i_len;
3845
3846 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
3847 memcpy(&ptr[old_len], dp, len);
3848 item->ri_buf[item->ri_cnt-1].i_len += len;
3849 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
3850 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
3851 return 0;
3852}
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867STATIC int
3868xlog_recover_add_to_trans(
3869 struct xlog *log,
3870 struct xlog_recover *trans,
3871 char *dp,
3872 int len)
3873{
3874 xfs_inode_log_format_t *in_f;
3875 xlog_recover_item_t *item;
3876 char *ptr;
3877
3878 if (!len)
3879 return 0;
3880 if (list_empty(&trans->r_itemq)) {
3881
3882 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
3883 xfs_warn(log->l_mp, "%s: bad header magic number",
3884 __func__);
3885 ASSERT(0);
3886 return -EIO;
3887 }
3888
3889 if (len > sizeof(struct xfs_trans_header)) {
3890 xfs_warn(log->l_mp, "%s: bad header length", __func__);
3891 ASSERT(0);
3892 return -EIO;
3893 }
3894
3895
3896
3897
3898
3899
3900 if (len == sizeof(struct xfs_trans_header))
3901 xlog_recover_add_item(&trans->r_itemq);
3902 memcpy(&trans->r_theader, dp, len);
3903 return 0;
3904 }
3905
3906 ptr = kmem_alloc(len, KM_SLEEP);
3907 memcpy(ptr, dp, len);
3908 in_f = (xfs_inode_log_format_t *)ptr;
3909
3910
3911 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
3912 if (item->ri_total != 0 &&
3913 item->ri_total == item->ri_cnt) {
3914
3915 xlog_recover_add_item(&trans->r_itemq);
3916 item = list_entry(trans->r_itemq.prev,
3917 xlog_recover_item_t, ri_list);
3918 }
3919
3920 if (item->ri_total == 0) {
3921 if (in_f->ilf_size == 0 ||
3922 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
3923 xfs_warn(log->l_mp,
3924 "bad number of regions (%d) in inode log format",
3925 in_f->ilf_size);
3926 ASSERT(0);
3927 kmem_free(ptr);
3928 return -EIO;
3929 }
3930
3931 item->ri_total = in_f->ilf_size;
3932 item->ri_buf =
3933 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
3934 KM_SLEEP);
3935 }
3936 ASSERT(item->ri_total > item->ri_cnt);
3937
3938 item->ri_buf[item->ri_cnt].i_addr = ptr;
3939 item->ri_buf[item->ri_cnt].i_len = len;
3940 item->ri_cnt++;
3941 trace_xfs_log_recover_item_add(log, trans, item, 0);
3942 return 0;
3943}
3944
3945
3946
3947
3948
3949
3950STATIC void
3951xlog_recover_free_trans(
3952 struct xlog_recover *trans)
3953{
3954 xlog_recover_item_t *item, *n;
3955 int i;
3956
3957 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3958
3959 list_del(&item->ri_list);
3960 for (i = 0; i < item->ri_cnt; i++)
3961 kmem_free(item->ri_buf[i].i_addr);
3962
3963 kmem_free(item->ri_buf);
3964 kmem_free(item);
3965 }
3966
3967 kmem_free(trans);
3968}
3969
3970
3971
3972
3973STATIC int
3974xlog_recovery_process_trans(
3975 struct xlog *log,
3976 struct xlog_recover *trans,
3977 char *dp,
3978 unsigned int len,
3979 unsigned int flags,
3980 int pass)
3981{
3982 int error = 0;
3983 bool freeit = false;
3984
3985
3986 flags &= ~XLOG_END_TRANS;
3987 if (flags & XLOG_WAS_CONT_TRANS)
3988 flags &= ~XLOG_CONTINUE_TRANS;
3989
3990
3991
3992
3993
3994 switch (flags) {
3995
3996 case 0:
3997 case XLOG_CONTINUE_TRANS:
3998 error = xlog_recover_add_to_trans(log, trans, dp, len);
3999 break;
4000 case XLOG_WAS_CONT_TRANS:
4001 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4002 break;
4003 case XLOG_COMMIT_TRANS:
4004 error = xlog_recover_commit_trans(log, trans, pass);
4005
4006 freeit = true;
4007 break;
4008
4009
4010 case XLOG_UNMOUNT_TRANS:
4011
4012 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4013 freeit = true;
4014 break;
4015 case XLOG_START_TRANS:
4016 default:
4017 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4018 ASSERT(0);
4019 error = -EIO;
4020 break;
4021 }
4022 if (error || freeit)
4023 xlog_recover_free_trans(trans);
4024 return error;
4025}
4026
4027
4028
4029
4030
4031
4032
4033
4034STATIC struct xlog_recover *
4035xlog_recover_ophdr_to_trans(
4036 struct hlist_head rhash[],
4037 struct xlog_rec_header *rhead,
4038 struct xlog_op_header *ohead)
4039{
4040 struct xlog_recover *trans;
4041 xlog_tid_t tid;
4042 struct hlist_head *rhp;
4043
4044 tid = be32_to_cpu(ohead->oh_tid);
4045 rhp = &rhash[XLOG_RHASH(tid)];
4046 hlist_for_each_entry(trans, rhp, r_list) {
4047 if (trans->r_log_tid == tid)
4048 return trans;
4049 }
4050
4051
4052
4053
4054
4055 if (!(ohead->oh_flags & XLOG_START_TRANS))
4056 return NULL;
4057
4058 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4059
4060
4061
4062
4063
4064 trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4065 trans->r_log_tid = tid;
4066 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4067 INIT_LIST_HEAD(&trans->r_itemq);
4068 INIT_HLIST_NODE(&trans->r_list);
4069 hlist_add_head(&trans->r_list, rhp);
4070
4071
4072
4073
4074
4075 return NULL;
4076}
4077
4078STATIC int
4079xlog_recover_process_ophdr(
4080 struct xlog *log,
4081 struct hlist_head rhash[],
4082 struct xlog_rec_header *rhead,
4083 struct xlog_op_header *ohead,
4084 char *dp,
4085 char *end,
4086 int pass)
4087{
4088 struct xlog_recover *trans;
4089 unsigned int len;
4090
4091
4092 if (ohead->oh_clientid != XFS_TRANSACTION &&
4093 ohead->oh_clientid != XFS_LOG) {
4094 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4095 __func__, ohead->oh_clientid);
4096 ASSERT(0);
4097 return -EIO;
4098 }
4099
4100
4101
4102
4103 len = be32_to_cpu(ohead->oh_len);
4104 if (dp + len > end) {
4105 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4106 WARN_ON(1);
4107 return -EIO;
4108 }
4109
4110 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4111 if (!trans) {
4112
4113 return 0;
4114 }
4115
4116 return xlog_recovery_process_trans(log, trans, dp, len,
4117 ohead->oh_flags, pass);
4118}
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129STATIC int
4130xlog_recover_process_data(
4131 struct xlog *log,
4132 struct hlist_head rhash[],
4133 struct xlog_rec_header *rhead,
4134 char *dp,
4135 int pass)
4136{
4137 struct xlog_op_header *ohead;
4138 char *end;
4139 int num_logops;
4140 int error;
4141
4142 end = dp + be32_to_cpu(rhead->h_len);
4143 num_logops = be32_to_cpu(rhead->h_num_logops);
4144
4145
4146 if (xlog_header_check_recover(log->l_mp, rhead))
4147 return -EIO;
4148
4149 while ((dp < end) && num_logops) {
4150
4151 ohead = (struct xlog_op_header *)dp;
4152 dp += sizeof(*ohead);
4153 ASSERT(dp <= end);
4154
4155
4156 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4157 dp, end, pass);
4158 if (error)
4159 return error;
4160
4161 dp += be32_to_cpu(ohead->oh_len);
4162 num_logops--;
4163 }
4164 return 0;
4165}
4166
4167
4168
4169
4170
4171STATIC int
4172xlog_recover_process_efi(
4173 xfs_mount_t *mp,
4174 xfs_efi_log_item_t *efip)
4175{
4176 xfs_efd_log_item_t *efdp;
4177 xfs_trans_t *tp;
4178 int i;
4179 int error = 0;
4180 xfs_extent_t *extp;
4181 xfs_fsblock_t startblock_fsb;
4182
4183 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
4184
4185
4186
4187
4188
4189
4190 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
4191 extp = &(efip->efi_format.efi_extents[i]);
4192 startblock_fsb = XFS_BB_TO_FSB(mp,
4193 XFS_FSB_TO_DADDR(mp, extp->ext_start));
4194 if ((startblock_fsb == 0) ||
4195 (extp->ext_len == 0) ||
4196 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
4197 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
4198
4199
4200
4201
4202 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
4203 xfs_efi_release(efip);
4204 return -EIO;
4205 }
4206 }
4207
4208 tp = xfs_trans_alloc(mp, 0);
4209 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
4210 if (error)
4211 goto abort_error;
4212 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
4213
4214 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
4215 extp = &(efip->efi_format.efi_extents[i]);
4216 error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
4217 extp->ext_len);
4218 if (error)
4219 goto abort_error;
4220
4221 }
4222
4223 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
4224 error = xfs_trans_commit(tp);
4225 return error;
4226
4227abort_error:
4228 xfs_trans_cancel(tp);
4229 return error;
4230}
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250STATIC int
4251xlog_recover_process_efis(
4252 struct xlog *log)
4253{
4254 struct xfs_log_item *lip;
4255 struct xfs_efi_log_item *efip;
4256 int error = 0;
4257 struct xfs_ail_cursor cur;
4258 struct xfs_ail *ailp;
4259
4260 ailp = log->l_ailp;
4261 spin_lock(&ailp->xa_lock);
4262 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4263 while (lip != NULL) {
4264
4265
4266
4267
4268 if (lip->li_type != XFS_LI_EFI) {
4269#ifdef DEBUG
4270 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4271 ASSERT(lip->li_type != XFS_LI_EFI);
4272#endif
4273 break;
4274 }
4275
4276
4277
4278
4279 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4280 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
4281 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4282 continue;
4283 }
4284
4285 spin_unlock(&ailp->xa_lock);
4286 error = xlog_recover_process_efi(log->l_mp, efip);
4287 spin_lock(&ailp->xa_lock);
4288 if (error)
4289 goto out;
4290 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4291 }
4292out:
4293 xfs_trans_ail_cursor_done(&cur);
4294 spin_unlock(&ailp->xa_lock);
4295 return error;
4296}
4297
4298
4299
4300
4301
4302STATIC int
4303xlog_recover_cancel_efis(
4304 struct xlog *log)
4305{
4306 struct xfs_log_item *lip;
4307 struct xfs_efi_log_item *efip;
4308 int error = 0;
4309 struct xfs_ail_cursor cur;
4310 struct xfs_ail *ailp;
4311
4312 ailp = log->l_ailp;
4313 spin_lock(&ailp->xa_lock);
4314 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4315 while (lip != NULL) {
4316
4317
4318
4319
4320 if (lip->li_type != XFS_LI_EFI) {
4321#ifdef DEBUG
4322 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4323 ASSERT(lip->li_type != XFS_LI_EFI);
4324#endif
4325 break;
4326 }
4327
4328 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4329
4330 spin_unlock(&ailp->xa_lock);
4331 xfs_efi_release(efip);
4332 spin_lock(&ailp->xa_lock);
4333
4334 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4335 }
4336
4337 xfs_trans_ail_cursor_done(&cur);
4338 spin_unlock(&ailp->xa_lock);
4339 return error;
4340}
4341
4342
4343
4344
4345
4346STATIC void
4347xlog_recover_clear_agi_bucket(
4348 xfs_mount_t *mp,
4349 xfs_agnumber_t agno,
4350 int bucket)
4351{
4352 xfs_trans_t *tp;
4353 xfs_agi_t *agi;
4354 xfs_buf_t *agibp;
4355 int offset;
4356 int error;
4357
4358 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
4359 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
4360 if (error)
4361 goto out_abort;
4362
4363 error = xfs_read_agi(mp, tp, agno, &agibp);
4364 if (error)
4365 goto out_abort;
4366
4367 agi = XFS_BUF_TO_AGI(agibp);
4368 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4369 offset = offsetof(xfs_agi_t, agi_unlinked) +
4370 (sizeof(xfs_agino_t) * bucket);
4371 xfs_trans_log_buf(tp, agibp, offset,
4372 (offset + sizeof(xfs_agino_t) - 1));
4373
4374 error = xfs_trans_commit(tp);
4375 if (error)
4376 goto out_error;
4377 return;
4378
4379out_abort:
4380 xfs_trans_cancel(tp);
4381out_error:
4382 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4383 return;
4384}
4385
4386STATIC xfs_agino_t
4387xlog_recover_process_one_iunlink(
4388 struct xfs_mount *mp,
4389 xfs_agnumber_t agno,
4390 xfs_agino_t agino,
4391 int bucket)
4392{
4393 struct xfs_buf *ibp;
4394 struct xfs_dinode *dip;
4395 struct xfs_inode *ip;
4396 xfs_ino_t ino;
4397 int error;
4398
4399 ino = XFS_AGINO_TO_INO(mp, agno, agino);
4400 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4401 if (error)
4402 goto fail;
4403
4404
4405
4406
4407 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
4408 if (error)
4409 goto fail_iput;
4410
4411 ASSERT(VFS_I(ip)->i_nlink == 0);
4412 ASSERT(VFS_I(ip)->i_mode != 0);
4413
4414
4415 agino = be32_to_cpu(dip->di_next_unlinked);
4416 xfs_buf_relse(ibp);
4417
4418
4419
4420
4421
4422 ip->i_d.di_dmevmask = 0;
4423
4424 IRELE(ip);
4425 return agino;
4426
4427 fail_iput:
4428 IRELE(ip);
4429 fail:
4430
4431
4432
4433
4434
4435
4436
4437
4438 xlog_recover_clear_agi_bucket(mp, agno, bucket);
4439 return NULLAGINO;
4440}
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454STATIC void
4455xlog_recover_process_iunlinks(
4456 struct xlog *log)
4457{
4458 xfs_mount_t *mp;
4459 xfs_agnumber_t agno;
4460 xfs_agi_t *agi;
4461 xfs_buf_t *agibp;
4462 xfs_agino_t agino;
4463 int bucket;
4464 int error;
4465 uint mp_dmevmask;
4466
4467 mp = log->l_mp;
4468
4469
4470
4471
4472 mp_dmevmask = mp->m_dmevmask;
4473 mp->m_dmevmask = 0;
4474
4475 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4476
4477
4478
4479 error = xfs_read_agi(mp, NULL, agno, &agibp);
4480 if (error) {
4481
4482
4483
4484
4485
4486
4487 continue;
4488 }
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498 agi = XFS_BUF_TO_AGI(agibp);
4499 xfs_buf_unlock(agibp);
4500
4501 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
4502 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
4503 while (agino != NULLAGINO) {
4504 agino = xlog_recover_process_one_iunlink(mp,
4505 agno, agino, bucket);
4506 }
4507 }
4508 xfs_buf_rele(agibp);
4509 }
4510
4511 mp->m_dmevmask = mp_dmevmask;
4512}
4513
4514STATIC int
4515xlog_unpack_data(
4516 struct xlog_rec_header *rhead,
4517 char *dp,
4518 struct xlog *log)
4519{
4520 int i, j, k;
4521
4522 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
4523 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
4524 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
4525 dp += BBSIZE;
4526 }
4527
4528 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4529 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
4530 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
4531 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4532 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4533 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
4534 dp += BBSIZE;
4535 }
4536 }
4537
4538 return 0;
4539}
4540
4541
4542
4543
4544STATIC int
4545xlog_recover_process(
4546 struct xlog *log,
4547 struct hlist_head rhash[],
4548 struct xlog_rec_header *rhead,
4549 char *dp,
4550 int pass)
4551{
4552 int error;
4553 __le32 crc;
4554
4555 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
4556
4557
4558
4559
4560
4561
4562
4563
4564 if (pass == XLOG_RECOVER_CRCPASS) {
4565 if (rhead->h_crc && crc != rhead->h_crc)
4566 return -EFSBADCRC;
4567 return 0;
4568 }
4569
4570
4571
4572
4573
4574
4575
4576 if (crc != rhead->h_crc) {
4577 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
4578 xfs_alert(log->l_mp,
4579 "log record CRC mismatch: found 0x%x, expected 0x%x.",
4580 le32_to_cpu(rhead->h_crc),
4581 le32_to_cpu(crc));
4582 xfs_hex_dump(dp, 32);
4583 }
4584
4585
4586
4587
4588
4589 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
4590 return -EFSCORRUPTED;
4591 }
4592
4593 error = xlog_unpack_data(rhead, dp, log);
4594 if (error)
4595 return error;
4596
4597 return xlog_recover_process_data(log, rhash, rhead, dp, pass);
4598}
4599
4600STATIC int
4601xlog_valid_rec_header(
4602 struct xlog *log,
4603 struct xlog_rec_header *rhead,
4604 xfs_daddr_t blkno)
4605{
4606 int hlen;
4607
4608 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
4609 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4610 XFS_ERRLEVEL_LOW, log->l_mp);
4611 return -EFSCORRUPTED;
4612 }
4613 if (unlikely(
4614 (!rhead->h_version ||
4615 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
4616 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
4617 __func__, be32_to_cpu(rhead->h_version));
4618 return -EIO;
4619 }
4620
4621
4622 hlen = be32_to_cpu(rhead->h_len);
4623 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
4624 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4625 XFS_ERRLEVEL_LOW, log->l_mp);
4626 return -EFSCORRUPTED;
4627 }
4628 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
4629 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4630 XFS_ERRLEVEL_LOW, log->l_mp);
4631 return -EFSCORRUPTED;
4632 }
4633 return 0;
4634}
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644STATIC int
4645xlog_do_recovery_pass(
4646 struct xlog *log,
4647 xfs_daddr_t head_blk,
4648 xfs_daddr_t tail_blk,
4649 int pass,
4650 xfs_daddr_t *first_bad)
4651{
4652 xlog_rec_header_t *rhead;
4653 xfs_daddr_t blk_no;
4654 xfs_daddr_t rhead_blk;
4655 char *offset;
4656 xfs_buf_t *hbp, *dbp;
4657 int error = 0, h_size, h_len;
4658 int bblks, split_bblks;
4659 int hblks, split_hblks, wrapped_hblks;
4660 struct hlist_head rhash[XLOG_RHASH_SIZE];
4661
4662 ASSERT(head_blk != tail_blk);
4663 rhead_blk = 0;
4664
4665
4666
4667
4668
4669 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4670
4671
4672
4673
4674
4675 hbp = xlog_get_bp(log, 1);
4676 if (!hbp)
4677 return -ENOMEM;
4678
4679 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
4680 if (error)
4681 goto bread_err1;
4682
4683 rhead = (xlog_rec_header_t *)offset;
4684 error = xlog_valid_rec_header(log, rhead, tail_blk);
4685 if (error)
4686 goto bread_err1;
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699 h_size = be32_to_cpu(rhead->h_size);
4700 h_len = be32_to_cpu(rhead->h_len);
4701 if (h_len > h_size) {
4702 if (h_len <= log->l_mp->m_logbsize &&
4703 be32_to_cpu(rhead->h_num_logops) == 1) {
4704 xfs_warn(log->l_mp,
4705 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
4706 h_size, log->l_mp->m_logbsize);
4707 h_size = log->l_mp->m_logbsize;
4708 } else
4709 return -EFSCORRUPTED;
4710 }
4711
4712 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
4713 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
4714 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
4715 if (h_size % XLOG_HEADER_CYCLE_SIZE)
4716 hblks++;
4717 xlog_put_bp(hbp);
4718 hbp = xlog_get_bp(log, hblks);
4719 } else {
4720 hblks = 1;
4721 }
4722 } else {
4723 ASSERT(log->l_sectBBsize == 1);
4724 hblks = 1;
4725 hbp = xlog_get_bp(log, 1);
4726 h_size = XLOG_BIG_RECORD_BSIZE;
4727 }
4728
4729 if (!hbp)
4730 return -ENOMEM;
4731 dbp = xlog_get_bp(log, BTOBB(h_size));
4732 if (!dbp) {
4733 xlog_put_bp(hbp);
4734 return -ENOMEM;
4735 }
4736
4737 memset(rhash, 0, sizeof(rhash));
4738 blk_no = rhead_blk = tail_blk;
4739 if (tail_blk > head_blk) {
4740
4741
4742
4743
4744
4745 while (blk_no < log->l_logBBsize) {
4746
4747
4748
4749 offset = hbp->b_addr;
4750 split_hblks = 0;
4751 wrapped_hblks = 0;
4752 if (blk_no + hblks <= log->l_logBBsize) {
4753
4754 error = xlog_bread(log, blk_no, hblks, hbp,
4755 &offset);
4756 if (error)
4757 goto bread_err2;
4758 } else {
4759
4760 if (blk_no != log->l_logBBsize) {
4761
4762 ASSERT(blk_no <= INT_MAX);
4763 split_hblks = log->l_logBBsize - (int)blk_no;
4764 ASSERT(split_hblks > 0);
4765 error = xlog_bread(log, blk_no,
4766 split_hblks, hbp,
4767 &offset);
4768 if (error)
4769 goto bread_err2;
4770 }
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784 wrapped_hblks = hblks - split_hblks;
4785 error = xlog_bread_offset(log, 0,
4786 wrapped_hblks, hbp,
4787 offset + BBTOB(split_hblks));
4788 if (error)
4789 goto bread_err2;
4790 }
4791 rhead = (xlog_rec_header_t *)offset;
4792 error = xlog_valid_rec_header(log, rhead,
4793 split_hblks ? blk_no : 0);
4794 if (error)
4795 goto bread_err2;
4796
4797 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4798 blk_no += hblks;
4799
4800
4801 if (blk_no + bblks <= log->l_logBBsize) {
4802 error = xlog_bread(log, blk_no, bblks, dbp,
4803 &offset);
4804 if (error)
4805 goto bread_err2;
4806 } else {
4807
4808
4809 offset = dbp->b_addr;
4810 split_bblks = 0;
4811 if (blk_no != log->l_logBBsize) {
4812
4813
4814 ASSERT(!wrapped_hblks);
4815 ASSERT(blk_no <= INT_MAX);
4816 split_bblks =
4817 log->l_logBBsize - (int)blk_no;
4818 ASSERT(split_bblks > 0);
4819 error = xlog_bread(log, blk_no,
4820 split_bblks, dbp,
4821 &offset);
4822 if (error)
4823 goto bread_err2;
4824 }
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838 error = xlog_bread_offset(log, 0,
4839 bblks - split_bblks, dbp,
4840 offset + BBTOB(split_bblks));
4841 if (error)
4842 goto bread_err2;
4843 }
4844
4845 error = xlog_recover_process(log, rhash, rhead, offset,
4846 pass);
4847 if (error)
4848 goto bread_err2;
4849
4850 blk_no += bblks;
4851 rhead_blk = blk_no;
4852 }
4853
4854 ASSERT(blk_no >= log->l_logBBsize);
4855 blk_no -= log->l_logBBsize;
4856 rhead_blk = blk_no;
4857 }
4858
4859
4860 while (blk_no < head_blk) {
4861 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4862 if (error)
4863 goto bread_err2;
4864
4865 rhead = (xlog_rec_header_t *)offset;
4866 error = xlog_valid_rec_header(log, rhead, blk_no);
4867 if (error)
4868 goto bread_err2;
4869
4870
4871 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4872 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
4873 &offset);
4874 if (error)
4875 goto bread_err2;
4876
4877 error = xlog_recover_process(log, rhash, rhead, offset, pass);
4878 if (error)
4879 goto bread_err2;
4880
4881 blk_no += bblks + hblks;
4882 rhead_blk = blk_no;
4883 }
4884
4885 bread_err2:
4886 xlog_put_bp(dbp);
4887 bread_err1:
4888 xlog_put_bp(hbp);
4889
4890 if (error && first_bad)
4891 *first_bad = rhead_blk;
4892
4893 return error;
4894}
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909STATIC int
4910xlog_do_log_recovery(
4911 struct xlog *log,
4912 xfs_daddr_t head_blk,
4913 xfs_daddr_t tail_blk)
4914{
4915 int error, i;
4916
4917 ASSERT(head_blk != tail_blk);
4918
4919
4920
4921
4922
4923 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4924 sizeof(struct list_head),
4925 KM_SLEEP);
4926 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4927 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4928
4929 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4930 XLOG_RECOVER_PASS1, NULL);
4931 if (error != 0) {
4932 kmem_free(log->l_buf_cancel_table);
4933 log->l_buf_cancel_table = NULL;
4934 return error;
4935 }
4936
4937
4938
4939
4940 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4941 XLOG_RECOVER_PASS2, NULL);
4942#ifdef DEBUG
4943 if (!error) {
4944 int i;
4945
4946 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4947 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4948 }
4949#endif
4950
4951 kmem_free(log->l_buf_cancel_table);
4952 log->l_buf_cancel_table = NULL;
4953
4954 return error;
4955}
4956
4957
4958
4959
4960STATIC int
4961xlog_do_recover(
4962 struct xlog *log,
4963 xfs_daddr_t head_blk,
4964 xfs_daddr_t tail_blk)
4965{
4966 struct xfs_mount *mp = log->l_mp;
4967 int error;
4968 xfs_buf_t *bp;
4969 xfs_sb_t *sbp;
4970
4971
4972
4973
4974 error = xlog_do_log_recovery(log, head_blk, tail_blk);
4975 if (error)
4976 return error;
4977
4978
4979
4980
4981 if (XFS_FORCED_SHUTDOWN(mp)) {
4982 return -EIO;
4983 }
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994 xlog_assign_tail_lsn(mp);
4995
4996
4997
4998
4999
5000 bp = xfs_getsb(mp, 0);
5001 bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5002 ASSERT(!(bp->b_flags & XBF_WRITE));
5003 bp->b_flags |= XBF_READ;
5004 bp->b_ops = &xfs_sb_buf_ops;
5005
5006 error = xfs_buf_submit_wait(bp);
5007 if (error) {
5008 if (!XFS_FORCED_SHUTDOWN(mp)) {
5009 xfs_buf_ioerror_alert(bp, __func__);
5010 ASSERT(0);
5011 }
5012 xfs_buf_relse(bp);
5013 return error;
5014 }
5015
5016
5017 sbp = &mp->m_sb;
5018 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5019 xfs_buf_relse(bp);
5020
5021
5022 xfs_reinit_percpu_counters(mp);
5023 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5024 if (error) {
5025 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5026 return error;
5027 }
5028
5029 xlog_recover_check_summary(log);
5030
5031
5032 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5033 return 0;
5034}
5035
5036
5037
5038
5039
5040
5041int
5042xlog_recover(
5043 struct xlog *log)
5044{
5045 xfs_daddr_t head_blk, tail_blk;
5046 int error;
5047
5048
5049 error = xlog_find_tail(log, &head_blk, &tail_blk);
5050 if (error)
5051 return error;
5052
5053
5054
5055
5056
5057
5058 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5059 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5060 return -EINVAL;
5061
5062 if (tail_blk != head_blk) {
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5075 return error;
5076 }
5077
5078
5079
5080
5081
5082
5083
5084
5085 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5086 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5087 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5088 xfs_warn(log->l_mp,
5089"Superblock has unknown incompatible log features (0x%x) enabled.",
5090 (log->l_mp->m_sb.sb_features_log_incompat &
5091 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5092 xfs_warn(log->l_mp,
5093"The log can not be fully and/or safely recovered by this kernel.");
5094 xfs_warn(log->l_mp,
5095"Please recover the log on a kernel that supports the unknown features.");
5096 return -EINVAL;
5097 }
5098
5099
5100
5101
5102
5103
5104 if (xfs_globals.log_recovery_delay) {
5105 xfs_notice(log->l_mp,
5106 "Delaying log recovery for %d seconds.",
5107 xfs_globals.log_recovery_delay);
5108 msleep(xfs_globals.log_recovery_delay * 1000);
5109 }
5110
5111 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5112 log->l_mp->m_logname ? log->l_mp->m_logname
5113 : "internal");
5114
5115 error = xlog_do_recover(log, head_blk, tail_blk);
5116 log->l_flags |= XLOG_RECOVERY_NEEDED;
5117 }
5118 return error;
5119}
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130int
5131xlog_recover_finish(
5132 struct xlog *log)
5133{
5134
5135
5136
5137
5138
5139
5140
5141
5142 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5143 int error;
5144 error = xlog_recover_process_efis(log);
5145 if (error) {
5146 xfs_alert(log->l_mp, "Failed to recover EFIs");
5147 return error;
5148 }
5149
5150
5151
5152
5153
5154
5155 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5156
5157 xlog_recover_process_iunlinks(log);
5158
5159 xlog_recover_check_summary(log);
5160
5161 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5162 log->l_mp->m_logname ? log->l_mp->m_logname
5163 : "internal");
5164 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5165 } else {
5166 xfs_info(log->l_mp, "Ending clean mount");
5167 }
5168 return 0;
5169}
5170
5171int
5172xlog_recover_cancel(
5173 struct xlog *log)
5174{
5175 int error = 0;
5176
5177 if (log->l_flags & XLOG_RECOVERY_NEEDED)
5178 error = xlog_recover_cancel_efis(log);
5179
5180 return error;
5181}
5182
5183#if defined(DEBUG)
5184
5185
5186
5187
5188void
5189xlog_recover_check_summary(
5190 struct xlog *log)
5191{
5192 xfs_mount_t *mp;
5193 xfs_agf_t *agfp;
5194 xfs_buf_t *agfbp;
5195 xfs_buf_t *agibp;
5196 xfs_agnumber_t agno;
5197 __uint64_t freeblks;
5198 __uint64_t itotal;
5199 __uint64_t ifree;
5200 int error;
5201
5202 mp = log->l_mp;
5203
5204 freeblks = 0LL;
5205 itotal = 0LL;
5206 ifree = 0LL;
5207 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5208 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5209 if (error) {
5210 xfs_alert(mp, "%s agf read failed agno %d error %d",
5211 __func__, agno, error);
5212 } else {
5213 agfp = XFS_BUF_TO_AGF(agfbp);
5214 freeblks += be32_to_cpu(agfp->agf_freeblks) +
5215 be32_to_cpu(agfp->agf_flcount);
5216 xfs_buf_relse(agfbp);
5217 }
5218
5219 error = xfs_read_agi(mp, NULL, agno, &agibp);
5220 if (error) {
5221 xfs_alert(mp, "%s agi read failed agno %d error %d",
5222 __func__, agno, error);
5223 } else {
5224 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
5225
5226 itotal += be32_to_cpu(agi->agi_count);
5227 ifree += be32_to_cpu(agi->agi_freecount);
5228 xfs_buf_relse(agibp);
5229 }
5230 }
5231}
5232#endif
5233