1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_error.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h"
32#include "xfs_btree.h"
33#include "xfs_dinode.h"
34#include "xfs_inode.h"
35#include "xfs_inode_item.h"
36#include "xfs_alloc.h"
37#include "xfs_ialloc.h"
38#include "xfs_log_priv.h"
39#include "xfs_buf_item.h"
40#include "xfs_log_recover.h"
41#include "xfs_extfree_item.h"
42#include "xfs_trans_priv.h"
43#include "xfs_quota.h"
44#include "xfs_utils.h"
45#include "xfs_cksum.h"
46#include "xfs_trace.h"
47#include "xfs_icache.h"
48#include "xfs_icreate_item.h"
49
50
51#include "xfs_symlink.h"
52#include "xfs_da_btree.h"
53#include "xfs_dir2_format.h"
54#include "xfs_dir2_priv.h"
55#include "xfs_attr_leaf.h"
56#include "xfs_attr_remote.h"
57
58STATIC int
59xlog_find_zeroed(
60 struct xlog *,
61 xfs_daddr_t *);
62STATIC int
63xlog_clear_stale_blocks(
64 struct xlog *,
65 xfs_lsn_t);
66#if defined(DEBUG)
67STATIC void
68xlog_recover_check_summary(
69 struct xlog *);
70#else
71#define xlog_recover_check_summary(log)
72#endif
73
74
75
76
77
78struct xfs_buf_cancel {
79 xfs_daddr_t bc_blkno;
80 uint bc_len;
81 int bc_refcount;
82 struct list_head bc_list;
83};
84
85
86
87
88
89
90
91
92
93
94
95static inline int
96xlog_buf_bbcount_valid(
97 struct xlog *log,
98 int bbcount)
99{
100 return bbcount > 0 && bbcount <= log->l_logBBsize;
101}
102
103
104
105
106
107
108STATIC xfs_buf_t *
109xlog_get_bp(
110 struct xlog *log,
111 int nbblks)
112{
113 struct xfs_buf *bp;
114
115 if (!xlog_buf_bbcount_valid(log, nbblks)) {
116 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
117 nbblks);
118 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
119 return NULL;
120 }
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138 if (nbblks > 1 && log->l_sectBBsize > 1)
139 nbblks += log->l_sectBBsize;
140 nbblks = round_up(nbblks, log->l_sectBBsize);
141
142 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
143 if (bp)
144 xfs_buf_unlock(bp);
145 return bp;
146}
147
148STATIC void
149xlog_put_bp(
150 xfs_buf_t *bp)
151{
152 xfs_buf_free(bp);
153}
154
155
156
157
158
159STATIC xfs_caddr_t
160xlog_align(
161 struct xlog *log,
162 xfs_daddr_t blk_no,
163 int nbblks,
164 struct xfs_buf *bp)
165{
166 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
167
168 ASSERT(offset + nbblks <= bp->b_length);
169 return bp->b_addr + BBTOB(offset);
170}
171
172
173
174
175
176STATIC int
177xlog_bread_noalign(
178 struct xlog *log,
179 xfs_daddr_t blk_no,
180 int nbblks,
181 struct xfs_buf *bp)
182{
183 int error;
184
185 if (!xlog_buf_bbcount_valid(log, nbblks)) {
186 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
187 nbblks);
188 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
189 return EFSCORRUPTED;
190 }
191
192 blk_no = round_down(blk_no, log->l_sectBBsize);
193 nbblks = round_up(nbblks, log->l_sectBBsize);
194
195 ASSERT(nbblks > 0);
196 ASSERT(nbblks <= bp->b_length);
197
198 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
199 XFS_BUF_READ(bp);
200 bp->b_io_length = nbblks;
201 bp->b_error = 0;
202
203 xfsbdstrat(log->l_mp, bp);
204 error = xfs_buf_iowait(bp);
205 if (error)
206 xfs_buf_ioerror_alert(bp, __func__);
207 return error;
208}
209
210STATIC int
211xlog_bread(
212 struct xlog *log,
213 xfs_daddr_t blk_no,
214 int nbblks,
215 struct xfs_buf *bp,
216 xfs_caddr_t *offset)
217{
218 int error;
219
220 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
221 if (error)
222 return error;
223
224 *offset = xlog_align(log, blk_no, nbblks, bp);
225 return 0;
226}
227
228
229
230
231
232STATIC int
233xlog_bread_offset(
234 struct xlog *log,
235 xfs_daddr_t blk_no,
236 int nbblks,
237 struct xfs_buf *bp,
238 xfs_caddr_t offset)
239{
240 xfs_caddr_t orig_offset = bp->b_addr;
241 int orig_len = BBTOB(bp->b_length);
242 int error, error2;
243
244 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
245 if (error)
246 return error;
247
248 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
249
250
251 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
252 if (error)
253 return error;
254 return error2;
255}
256
257
258
259
260
261
262STATIC int
263xlog_bwrite(
264 struct xlog *log,
265 xfs_daddr_t blk_no,
266 int nbblks,
267 struct xfs_buf *bp)
268{
269 int error;
270
271 if (!xlog_buf_bbcount_valid(log, nbblks)) {
272 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
273 nbblks);
274 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
275 return EFSCORRUPTED;
276 }
277
278 blk_no = round_down(blk_no, log->l_sectBBsize);
279 nbblks = round_up(nbblks, log->l_sectBBsize);
280
281 ASSERT(nbblks > 0);
282 ASSERT(nbblks <= bp->b_length);
283
284 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
285 XFS_BUF_ZEROFLAGS(bp);
286 xfs_buf_hold(bp);
287 xfs_buf_lock(bp);
288 bp->b_io_length = nbblks;
289 bp->b_error = 0;
290
291 error = xfs_bwrite(bp);
292 if (error)
293 xfs_buf_ioerror_alert(bp, __func__);
294 xfs_buf_relse(bp);
295 return error;
296}
297
298#ifdef DEBUG
299
300
301
302STATIC void
303xlog_header_check_dump(
304 xfs_mount_t *mp,
305 xlog_rec_header_t *head)
306{
307 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
308 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
309 xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
310 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
311}
312#else
313#define xlog_header_check_dump(mp, head)
314#endif
315
316
317
318
319STATIC int
320xlog_header_check_recover(
321 xfs_mount_t *mp,
322 xlog_rec_header_t *head)
323{
324 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
325
326
327
328
329
330
331 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
332 xfs_warn(mp,
333 "dirty log written in incompatible format - can't recover");
334 xlog_header_check_dump(mp, head);
335 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
336 XFS_ERRLEVEL_HIGH, mp);
337 return XFS_ERROR(EFSCORRUPTED);
338 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
339 xfs_warn(mp,
340 "dirty log entry has mismatched uuid - can't recover");
341 xlog_header_check_dump(mp, head);
342 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
343 XFS_ERRLEVEL_HIGH, mp);
344 return XFS_ERROR(EFSCORRUPTED);
345 }
346 return 0;
347}
348
349
350
351
352STATIC int
353xlog_header_check_mount(
354 xfs_mount_t *mp,
355 xlog_rec_header_t *head)
356{
357 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
358
359 if (uuid_is_nil(&head->h_fs_uuid)) {
360
361
362
363
364
365 xfs_warn(mp, "nil uuid in log - IRIX style log");
366 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
367 xfs_warn(mp, "log has mismatched uuid - can't recover");
368 xlog_header_check_dump(mp, head);
369 XFS_ERROR_REPORT("xlog_header_check_mount",
370 XFS_ERRLEVEL_HIGH, mp);
371 return XFS_ERROR(EFSCORRUPTED);
372 }
373 return 0;
374}
375
376STATIC void
377xlog_recover_iodone(
378 struct xfs_buf *bp)
379{
380 if (bp->b_error) {
381
382
383
384
385 xfs_buf_ioerror_alert(bp, __func__);
386 xfs_force_shutdown(bp->b_target->bt_mount,
387 SHUTDOWN_META_IO_ERROR);
388 }
389 bp->b_iodone = NULL;
390 xfs_buf_ioend(bp, 0);
391}
392
393
394
395
396
397
398
399STATIC int
400xlog_find_cycle_start(
401 struct xlog *log,
402 struct xfs_buf *bp,
403 xfs_daddr_t first_blk,
404 xfs_daddr_t *last_blk,
405 uint cycle)
406{
407 xfs_caddr_t offset;
408 xfs_daddr_t mid_blk;
409 xfs_daddr_t end_blk;
410 uint mid_cycle;
411 int error;
412
413 end_blk = *last_blk;
414 mid_blk = BLK_AVG(first_blk, end_blk);
415 while (mid_blk != first_blk && mid_blk != end_blk) {
416 error = xlog_bread(log, mid_blk, 1, bp, &offset);
417 if (error)
418 return error;
419 mid_cycle = xlog_get_cycle(offset);
420 if (mid_cycle == cycle)
421 end_blk = mid_blk;
422 else
423 first_blk = mid_blk;
424 mid_blk = BLK_AVG(first_blk, end_blk);
425 }
426 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
427 (mid_blk == end_blk && mid_blk-1 == first_blk));
428
429 *last_blk = end_blk;
430
431 return 0;
432}
433
434
435
436
437
438
439
440
441
442STATIC int
443xlog_find_verify_cycle(
444 struct xlog *log,
445 xfs_daddr_t start_blk,
446 int nbblks,
447 uint stop_on_cycle_no,
448 xfs_daddr_t *new_blk)
449{
450 xfs_daddr_t i, j;
451 uint cycle;
452 xfs_buf_t *bp;
453 xfs_daddr_t bufblks;
454 xfs_caddr_t buf = NULL;
455 int error = 0;
456
457
458
459
460
461
462
463 bufblks = 1 << ffs(nbblks);
464 while (bufblks > log->l_logBBsize)
465 bufblks >>= 1;
466 while (!(bp = xlog_get_bp(log, bufblks))) {
467 bufblks >>= 1;
468 if (bufblks < log->l_sectBBsize)
469 return ENOMEM;
470 }
471
472 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
473 int bcount;
474
475 bcount = min(bufblks, (start_blk + nbblks - i));
476
477 error = xlog_bread(log, i, bcount, bp, &buf);
478 if (error)
479 goto out;
480
481 for (j = 0; j < bcount; j++) {
482 cycle = xlog_get_cycle(buf);
483 if (cycle == stop_on_cycle_no) {
484 *new_blk = i+j;
485 goto out;
486 }
487
488 buf += BBSIZE;
489 }
490 }
491
492 *new_blk = -1;
493
494out:
495 xlog_put_bp(bp);
496 return error;
497}
498
499
500
501
502
503
504
505
506
507
508
509
510
511STATIC int
512xlog_find_verify_log_record(
513 struct xlog *log,
514 xfs_daddr_t start_blk,
515 xfs_daddr_t *last_blk,
516 int extra_bblks)
517{
518 xfs_daddr_t i;
519 xfs_buf_t *bp;
520 xfs_caddr_t offset = NULL;
521 xlog_rec_header_t *head = NULL;
522 int error = 0;
523 int smallmem = 0;
524 int num_blks = *last_blk - start_blk;
525 int xhdrs;
526
527 ASSERT(start_blk != 0 || *last_blk != start_blk);
528
529 if (!(bp = xlog_get_bp(log, num_blks))) {
530 if (!(bp = xlog_get_bp(log, 1)))
531 return ENOMEM;
532 smallmem = 1;
533 } else {
534 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
535 if (error)
536 goto out;
537 offset += ((num_blks - 1) << BBSHIFT);
538 }
539
540 for (i = (*last_blk) - 1; i >= 0; i--) {
541 if (i < start_blk) {
542
543 xfs_warn(log->l_mp,
544 "Log inconsistent (didn't find previous header)");
545 ASSERT(0);
546 error = XFS_ERROR(EIO);
547 goto out;
548 }
549
550 if (smallmem) {
551 error = xlog_bread(log, i, 1, bp, &offset);
552 if (error)
553 goto out;
554 }
555
556 head = (xlog_rec_header_t *)offset;
557
558 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
559 break;
560
561 if (!smallmem)
562 offset -= BBSIZE;
563 }
564
565
566
567
568
569
570 if (i == -1) {
571 error = -1;
572 goto out;
573 }
574
575
576
577
578
579 if ((error = xlog_header_check_mount(log->l_mp, head)))
580 goto out;
581
582
583
584
585
586
587
588
589 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
590 uint h_size = be32_to_cpu(head->h_size);
591
592 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
593 if (h_size % XLOG_HEADER_CYCLE_SIZE)
594 xhdrs++;
595 } else {
596 xhdrs = 1;
597 }
598
599 if (*last_blk - i + extra_bblks !=
600 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
601 *last_blk = i;
602
603out:
604 xlog_put_bp(bp);
605 return error;
606}
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621STATIC int
622xlog_find_head(
623 struct xlog *log,
624 xfs_daddr_t *return_head_blk)
625{
626 xfs_buf_t *bp;
627 xfs_caddr_t offset;
628 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
629 int num_scan_bblks;
630 uint first_half_cycle, last_half_cycle;
631 uint stop_on_cycle;
632 int error, log_bbnum = log->l_logBBsize;
633
634
635 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
636 *return_head_blk = first_blk;
637
638
639 if (!first_blk) {
640
641
642
643
644 xfs_warn(log->l_mp, "totally zeroed log");
645 }
646
647 return 0;
648 } else if (error) {
649 xfs_warn(log->l_mp, "empty log check failed");
650 return error;
651 }
652
653 first_blk = 0;
654 bp = xlog_get_bp(log, 1);
655 if (!bp)
656 return ENOMEM;
657
658 error = xlog_bread(log, 0, 1, bp, &offset);
659 if (error)
660 goto bp_err;
661
662 first_half_cycle = xlog_get_cycle(offset);
663
664 last_blk = head_blk = log_bbnum - 1;
665 error = xlog_bread(log, last_blk, 1, bp, &offset);
666 if (error)
667 goto bp_err;
668
669 last_half_cycle = xlog_get_cycle(offset);
670 ASSERT(last_half_cycle != 0);
671
672
673
674
675
676
677
678
679
680
681
682
683 if (first_half_cycle == last_half_cycle) {
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709 head_blk = log_bbnum;
710 stop_on_cycle = last_half_cycle - 1;
711 } else {
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734 stop_on_cycle = last_half_cycle;
735 if ((error = xlog_find_cycle_start(log, bp, first_blk,
736 &head_blk, last_half_cycle)))
737 goto bp_err;
738 }
739
740
741
742
743
744
745
746
747 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
748 if (head_blk >= num_scan_bblks) {
749
750
751
752
753 start_blk = head_blk - num_scan_bblks;
754 if ((error = xlog_find_verify_cycle(log,
755 start_blk, num_scan_bblks,
756 stop_on_cycle, &new_blk)))
757 goto bp_err;
758 if (new_blk != -1)
759 head_blk = new_blk;
760 } else {
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788 ASSERT(head_blk <= INT_MAX &&
789 (xfs_daddr_t) num_scan_bblks >= head_blk);
790 start_blk = log_bbnum - (num_scan_bblks - head_blk);
791 if ((error = xlog_find_verify_cycle(log, start_blk,
792 num_scan_bblks - (int)head_blk,
793 (stop_on_cycle - 1), &new_blk)))
794 goto bp_err;
795 if (new_blk != -1) {
796 head_blk = new_blk;
797 goto validate_head;
798 }
799
800
801
802
803
804
805 start_blk = 0;
806 ASSERT(head_blk <= INT_MAX);
807 if ((error = xlog_find_verify_cycle(log,
808 start_blk, (int)head_blk,
809 stop_on_cycle, &new_blk)))
810 goto bp_err;
811 if (new_blk != -1)
812 head_blk = new_blk;
813 }
814
815validate_head:
816
817
818
819
820 num_scan_bblks = XLOG_REC_SHIFT(log);
821 if (head_blk >= num_scan_bblks) {
822 start_blk = head_blk - num_scan_bblks;
823
824
825 if ((error = xlog_find_verify_log_record(log, start_blk,
826 &head_blk, 0)) == -1) {
827 error = XFS_ERROR(EIO);
828 goto bp_err;
829 } else if (error)
830 goto bp_err;
831 } else {
832 start_blk = 0;
833 ASSERT(head_blk <= INT_MAX);
834 if ((error = xlog_find_verify_log_record(log, start_blk,
835 &head_blk, 0)) == -1) {
836
837 start_blk = log_bbnum - (num_scan_bblks - head_blk);
838 new_blk = log_bbnum;
839 ASSERT(start_blk <= INT_MAX &&
840 (xfs_daddr_t) log_bbnum-start_blk >= 0);
841 ASSERT(head_blk <= INT_MAX);
842 if ((error = xlog_find_verify_log_record(log,
843 start_blk, &new_blk,
844 (int)head_blk)) == -1) {
845 error = XFS_ERROR(EIO);
846 goto bp_err;
847 } else if (error)
848 goto bp_err;
849 if (new_blk != log_bbnum)
850 head_blk = new_blk;
851 } else if (error)
852 goto bp_err;
853 }
854
855 xlog_put_bp(bp);
856 if (head_blk == log_bbnum)
857 *return_head_blk = 0;
858 else
859 *return_head_blk = head_blk;
860
861
862
863
864
865
866 return 0;
867
868 bp_err:
869 xlog_put_bp(bp);
870
871 if (error)
872 xfs_warn(log->l_mp, "failed to find log head");
873 return error;
874}
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892STATIC int
893xlog_find_tail(
894 struct xlog *log,
895 xfs_daddr_t *head_blk,
896 xfs_daddr_t *tail_blk)
897{
898 xlog_rec_header_t *rhead;
899 xlog_op_header_t *op_head;
900 xfs_caddr_t offset = NULL;
901 xfs_buf_t *bp;
902 int error, i, found;
903 xfs_daddr_t umount_data_blk;
904 xfs_daddr_t after_umount_blk;
905 xfs_lsn_t tail_lsn;
906 int hblks;
907
908 found = 0;
909
910
911
912
913 if ((error = xlog_find_head(log, head_blk)))
914 return error;
915
916 bp = xlog_get_bp(log, 1);
917 if (!bp)
918 return ENOMEM;
919 if (*head_blk == 0) {
920 error = xlog_bread(log, 0, 1, bp, &offset);
921 if (error)
922 goto done;
923
924 if (xlog_get_cycle(offset) == 0) {
925 *tail_blk = 0;
926
927 goto done;
928 }
929 }
930
931
932
933
934 ASSERT(*head_blk < INT_MAX);
935 for (i = (int)(*head_blk) - 1; i >= 0; i--) {
936 error = xlog_bread(log, i, 1, bp, &offset);
937 if (error)
938 goto done;
939
940 if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
941 found = 1;
942 break;
943 }
944 }
945
946
947
948
949
950
951 if (!found) {
952 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
953 error = xlog_bread(log, i, 1, bp, &offset);
954 if (error)
955 goto done;
956
957 if (*(__be32 *)offset ==
958 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
959 found = 2;
960 break;
961 }
962 }
963 }
964 if (!found) {
965 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
966 ASSERT(0);
967 return XFS_ERROR(EIO);
968 }
969
970
971 rhead = (xlog_rec_header_t *)offset;
972 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
973
974
975
976
977
978
979
980
981
982
983
984 log->l_prev_block = i;
985 log->l_curr_block = (int)*head_blk;
986 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
987 if (found == 2)
988 log->l_curr_cycle++;
989 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
990 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
991 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
992 BBTOB(log->l_curr_block));
993 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
994 BBTOB(log->l_curr_block));
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1008 int h_size = be32_to_cpu(rhead->h_size);
1009 int h_version = be32_to_cpu(rhead->h_version);
1010
1011 if ((h_version & XLOG_VERSION_2) &&
1012 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1013 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1014 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1015 hblks++;
1016 } else {
1017 hblks = 1;
1018 }
1019 } else {
1020 hblks = 1;
1021 }
1022 after_umount_blk = (i + hblks + (int)
1023 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1024 tail_lsn = atomic64_read(&log->l_tail_lsn);
1025 if (*head_blk == after_umount_blk &&
1026 be32_to_cpu(rhead->h_num_logops) == 1) {
1027 umount_data_blk = (i + hblks) % log->l_logBBsize;
1028 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1029 if (error)
1030 goto done;
1031
1032 op_head = (xlog_op_header_t *)offset;
1033 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1034
1035
1036
1037
1038
1039 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1040 log->l_curr_cycle, after_umount_blk);
1041 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1042 log->l_curr_cycle, after_umount_blk);
1043 *tail_blk = after_umount_blk;
1044
1045
1046
1047
1048
1049
1050
1051 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1052 }
1053 }
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1075 error = xlog_clear_stale_blocks(log, tail_lsn);
1076
1077done:
1078 xlog_put_bp(bp);
1079
1080 if (error)
1081 xfs_warn(log->l_mp, "failed to locate log tail");
1082 return error;
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101STATIC int
1102xlog_find_zeroed(
1103 struct xlog *log,
1104 xfs_daddr_t *blk_no)
1105{
1106 xfs_buf_t *bp;
1107 xfs_caddr_t offset;
1108 uint first_cycle, last_cycle;
1109 xfs_daddr_t new_blk, last_blk, start_blk;
1110 xfs_daddr_t num_scan_bblks;
1111 int error, log_bbnum = log->l_logBBsize;
1112
1113 *blk_no = 0;
1114
1115
1116 bp = xlog_get_bp(log, 1);
1117 if (!bp)
1118 return ENOMEM;
1119 error = xlog_bread(log, 0, 1, bp, &offset);
1120 if (error)
1121 goto bp_err;
1122
1123 first_cycle = xlog_get_cycle(offset);
1124 if (first_cycle == 0) {
1125 *blk_no = 0;
1126 xlog_put_bp(bp);
1127 return -1;
1128 }
1129
1130
1131 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1132 if (error)
1133 goto bp_err;
1134
1135 last_cycle = xlog_get_cycle(offset);
1136 if (last_cycle != 0) {
1137 xlog_put_bp(bp);
1138 return 0;
1139 } else if (first_cycle != 1) {
1140
1141
1142
1143
1144
1145 xfs_warn(log->l_mp,
1146 "Log inconsistent or not a log (last==0, first!=1)");
1147 return XFS_ERROR(EINVAL);
1148 }
1149
1150
1151 last_blk = log_bbnum-1;
1152 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1153 goto bp_err;
1154
1155
1156
1157
1158
1159
1160
1161 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1162 ASSERT(num_scan_bblks <= INT_MAX);
1163
1164 if (last_blk < num_scan_bblks)
1165 num_scan_bblks = last_blk;
1166 start_blk = last_blk - num_scan_bblks;
1167
1168
1169
1170
1171
1172
1173
1174 if ((error = xlog_find_verify_cycle(log, start_blk,
1175 (int)num_scan_bblks, 0, &new_blk)))
1176 goto bp_err;
1177 if (new_blk != -1)
1178 last_blk = new_blk;
1179
1180
1181
1182
1183
1184 if ((error = xlog_find_verify_log_record(log, start_blk,
1185 &last_blk, 0)) == -1) {
1186 error = XFS_ERROR(EIO);
1187 goto bp_err;
1188 } else if (error)
1189 goto bp_err;
1190
1191 *blk_no = last_blk;
1192bp_err:
1193 xlog_put_bp(bp);
1194 if (error)
1195 return error;
1196 return -1;
1197}
1198
1199
1200
1201
1202
1203
1204STATIC void
1205xlog_add_record(
1206 struct xlog *log,
1207 xfs_caddr_t buf,
1208 int cycle,
1209 int block,
1210 int tail_cycle,
1211 int tail_block)
1212{
1213 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1214
1215 memset(buf, 0, BBSIZE);
1216 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1217 recp->h_cycle = cpu_to_be32(cycle);
1218 recp->h_version = cpu_to_be32(
1219 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1220 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1221 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1222 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1223 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1224}
1225
1226STATIC int
1227xlog_write_log_records(
1228 struct xlog *log,
1229 int cycle,
1230 int start_block,
1231 int blocks,
1232 int tail_cycle,
1233 int tail_block)
1234{
1235 xfs_caddr_t offset;
1236 xfs_buf_t *bp;
1237 int balign, ealign;
1238 int sectbb = log->l_sectBBsize;
1239 int end_block = start_block + blocks;
1240 int bufblks;
1241 int error = 0;
1242 int i, j = 0;
1243
1244
1245
1246
1247
1248
1249
1250 bufblks = 1 << ffs(blocks);
1251 while (bufblks > log->l_logBBsize)
1252 bufblks >>= 1;
1253 while (!(bp = xlog_get_bp(log, bufblks))) {
1254 bufblks >>= 1;
1255 if (bufblks < sectbb)
1256 return ENOMEM;
1257 }
1258
1259
1260
1261
1262
1263 balign = round_down(start_block, sectbb);
1264 if (balign != start_block) {
1265 error = xlog_bread_noalign(log, start_block, 1, bp);
1266 if (error)
1267 goto out_put_bp;
1268
1269 j = start_block - balign;
1270 }
1271
1272 for (i = start_block; i < end_block; i += bufblks) {
1273 int bcount, endcount;
1274
1275 bcount = min(bufblks, end_block - start_block);
1276 endcount = bcount - j;
1277
1278
1279
1280
1281
1282 ealign = round_down(end_block, sectbb);
1283 if (j == 0 && (start_block + endcount > ealign)) {
1284 offset = bp->b_addr + BBTOB(ealign - start_block);
1285 error = xlog_bread_offset(log, ealign, sectbb,
1286 bp, offset);
1287 if (error)
1288 break;
1289
1290 }
1291
1292 offset = xlog_align(log, start_block, endcount, bp);
1293 for (; j < endcount; j++) {
1294 xlog_add_record(log, offset, cycle, i+j,
1295 tail_cycle, tail_block);
1296 offset += BBSIZE;
1297 }
1298 error = xlog_bwrite(log, start_block, endcount, bp);
1299 if (error)
1300 break;
1301 start_block += endcount;
1302 j = 0;
1303 }
1304
1305 out_put_bp:
1306 xlog_put_bp(bp);
1307 return error;
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326STATIC int
1327xlog_clear_stale_blocks(
1328 struct xlog *log,
1329 xfs_lsn_t tail_lsn)
1330{
1331 int tail_cycle, head_cycle;
1332 int tail_block, head_block;
1333 int tail_distance, max_distance;
1334 int distance;
1335 int error;
1336
1337 tail_cycle = CYCLE_LSN(tail_lsn);
1338 tail_block = BLOCK_LSN(tail_lsn);
1339 head_cycle = log->l_curr_cycle;
1340 head_block = log->l_curr_block;
1341
1342
1343
1344
1345
1346
1347
1348 if (head_cycle == tail_cycle) {
1349
1350
1351
1352
1353
1354
1355
1356 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1357 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1358 XFS_ERRLEVEL_LOW, log->l_mp);
1359 return XFS_ERROR(EFSCORRUPTED);
1360 }
1361 tail_distance = tail_block + (log->l_logBBsize - head_block);
1362 } else {
1363
1364
1365
1366
1367
1368 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1369 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1370 XFS_ERRLEVEL_LOW, log->l_mp);
1371 return XFS_ERROR(EFSCORRUPTED);
1372 }
1373 tail_distance = tail_block - head_block;
1374 }
1375
1376
1377
1378
1379
1380 if (tail_distance <= 0) {
1381 ASSERT(tail_distance == 0);
1382 return 0;
1383 }
1384
1385 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1386
1387
1388
1389
1390
1391
1392
1393 max_distance = MIN(max_distance, tail_distance);
1394
1395 if ((head_block + max_distance) <= log->l_logBBsize) {
1396
1397
1398
1399
1400
1401
1402
1403 error = xlog_write_log_records(log, (head_cycle - 1),
1404 head_block, max_distance, tail_cycle,
1405 tail_block);
1406 if (error)
1407 return error;
1408 } else {
1409
1410
1411
1412
1413
1414
1415
1416 distance = log->l_logBBsize - head_block;
1417 error = xlog_write_log_records(log, (head_cycle - 1),
1418 head_block, distance, tail_cycle,
1419 tail_block);
1420
1421 if (error)
1422 return error;
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432 distance = max_distance - (log->l_logBBsize - head_block);
1433 error = xlog_write_log_records(log, head_cycle, 0, distance,
1434 tail_cycle, tail_block);
1435 if (error)
1436 return error;
1437 }
1438
1439 return 0;
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449STATIC xlog_recover_t *
1450xlog_recover_find_tid(
1451 struct hlist_head *head,
1452 xlog_tid_t tid)
1453{
1454 xlog_recover_t *trans;
1455
1456 hlist_for_each_entry(trans, head, r_list) {
1457 if (trans->r_log_tid == tid)
1458 return trans;
1459 }
1460 return NULL;
1461}
1462
1463STATIC void
1464xlog_recover_new_tid(
1465 struct hlist_head *head,
1466 xlog_tid_t tid,
1467 xfs_lsn_t lsn)
1468{
1469 xlog_recover_t *trans;
1470
1471 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1472 trans->r_log_tid = tid;
1473 trans->r_lsn = lsn;
1474 INIT_LIST_HEAD(&trans->r_itemq);
1475
1476 INIT_HLIST_NODE(&trans->r_list);
1477 hlist_add_head(&trans->r_list, head);
1478}
1479
1480STATIC void
1481xlog_recover_add_item(
1482 struct list_head *head)
1483{
1484 xlog_recover_item_t *item;
1485
1486 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1487 INIT_LIST_HEAD(&item->ri_list);
1488 list_add_tail(&item->ri_list, head);
1489}
1490
1491STATIC int
1492xlog_recover_add_to_cont_trans(
1493 struct xlog *log,
1494 struct xlog_recover *trans,
1495 xfs_caddr_t dp,
1496 int len)
1497{
1498 xlog_recover_item_t *item;
1499 xfs_caddr_t ptr, old_ptr;
1500 int old_len;
1501
1502 if (list_empty(&trans->r_itemq)) {
1503
1504 xlog_recover_add_item(&trans->r_itemq);
1505 ptr = (xfs_caddr_t) &trans->r_theader +
1506 sizeof(xfs_trans_header_t) - len;
1507 memcpy(ptr, dp, len);
1508 return 0;
1509 }
1510
1511 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1512
1513 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1514 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1515
1516 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1517 memcpy(&ptr[old_len], dp, len);
1518 item->ri_buf[item->ri_cnt-1].i_len += len;
1519 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1520 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1521 return 0;
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537STATIC int
1538xlog_recover_add_to_trans(
1539 struct xlog *log,
1540 struct xlog_recover *trans,
1541 xfs_caddr_t dp,
1542 int len)
1543{
1544 xfs_inode_log_format_t *in_f;
1545 xlog_recover_item_t *item;
1546 xfs_caddr_t ptr;
1547
1548 if (!len)
1549 return 0;
1550 if (list_empty(&trans->r_itemq)) {
1551
1552 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1553 xfs_warn(log->l_mp, "%s: bad header magic number",
1554 __func__);
1555 ASSERT(0);
1556 return XFS_ERROR(EIO);
1557 }
1558 if (len == sizeof(xfs_trans_header_t))
1559 xlog_recover_add_item(&trans->r_itemq);
1560 memcpy(&trans->r_theader, dp, len);
1561 return 0;
1562 }
1563
1564 ptr = kmem_alloc(len, KM_SLEEP);
1565 memcpy(ptr, dp, len);
1566 in_f = (xfs_inode_log_format_t *)ptr;
1567
1568
1569 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1570 if (item->ri_total != 0 &&
1571 item->ri_total == item->ri_cnt) {
1572
1573 xlog_recover_add_item(&trans->r_itemq);
1574 item = list_entry(trans->r_itemq.prev,
1575 xlog_recover_item_t, ri_list);
1576 }
1577
1578 if (item->ri_total == 0) {
1579 if (in_f->ilf_size == 0 ||
1580 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1581 xfs_warn(log->l_mp,
1582 "bad number of regions (%d) in inode log format",
1583 in_f->ilf_size);
1584 ASSERT(0);
1585 return XFS_ERROR(EIO);
1586 }
1587
1588 item->ri_total = in_f->ilf_size;
1589 item->ri_buf =
1590 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1591 KM_SLEEP);
1592 }
1593 ASSERT(item->ri_total > item->ri_cnt);
1594
1595 item->ri_buf[item->ri_cnt].i_addr = ptr;
1596 item->ri_buf[item->ri_cnt].i_len = len;
1597 item->ri_cnt++;
1598 trace_xfs_log_recover_item_add(log, trans, item, 0);
1599 return 0;
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651STATIC int
1652xlog_recover_reorder_trans(
1653 struct xlog *log,
1654 struct xlog_recover *trans,
1655 int pass)
1656{
1657 xlog_recover_item_t *item, *n;
1658 LIST_HEAD(sort_list);
1659 LIST_HEAD(cancel_list);
1660 LIST_HEAD(buffer_list);
1661 LIST_HEAD(inode_buffer_list);
1662 LIST_HEAD(inode_list);
1663
1664 list_splice_init(&trans->r_itemq, &sort_list);
1665 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1666 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1667
1668 switch (ITEM_TYPE(item)) {
1669 case XFS_LI_ICREATE:
1670 list_move_tail(&item->ri_list, &buffer_list);
1671 break;
1672 case XFS_LI_BUF:
1673 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1674 trace_xfs_log_recover_item_reorder_head(log,
1675 trans, item, pass);
1676 list_move(&item->ri_list, &cancel_list);
1677 break;
1678 }
1679 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1680 list_move(&item->ri_list, &inode_buffer_list);
1681 break;
1682 }
1683 list_move_tail(&item->ri_list, &buffer_list);
1684 break;
1685 case XFS_LI_INODE:
1686 case XFS_LI_DQUOT:
1687 case XFS_LI_QUOTAOFF:
1688 case XFS_LI_EFD:
1689 case XFS_LI_EFI:
1690 trace_xfs_log_recover_item_reorder_tail(log,
1691 trans, item, pass);
1692 list_move_tail(&item->ri_list, &inode_list);
1693 break;
1694 default:
1695 xfs_warn(log->l_mp,
1696 "%s: unrecognized type of log operation",
1697 __func__);
1698 ASSERT(0);
1699 return XFS_ERROR(EIO);
1700 }
1701 }
1702 ASSERT(list_empty(&sort_list));
1703 if (!list_empty(&buffer_list))
1704 list_splice(&buffer_list, &trans->r_itemq);
1705 if (!list_empty(&inode_list))
1706 list_splice_tail(&inode_list, &trans->r_itemq);
1707 if (!list_empty(&inode_buffer_list))
1708 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1709 if (!list_empty(&cancel_list))
1710 list_splice_tail(&cancel_list, &trans->r_itemq);
1711 return 0;
1712}
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726STATIC int
1727xlog_recover_buffer_pass1(
1728 struct xlog *log,
1729 struct xlog_recover_item *item)
1730{
1731 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1732 struct list_head *bucket;
1733 struct xfs_buf_cancel *bcp;
1734
1735
1736
1737
1738 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1739 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1740 return 0;
1741 }
1742
1743
1744
1745
1746
1747 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1748 list_for_each_entry(bcp, bucket, bc_list) {
1749 if (bcp->bc_blkno == buf_f->blf_blkno &&
1750 bcp->bc_len == buf_f->blf_len) {
1751 bcp->bc_refcount++;
1752 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1753 return 0;
1754 }
1755 }
1756
1757 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1758 bcp->bc_blkno = buf_f->blf_blkno;
1759 bcp->bc_len = buf_f->blf_len;
1760 bcp->bc_refcount = 1;
1761 list_add_tail(&bcp->bc_list, bucket);
1762
1763 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1764 return 0;
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780STATIC int
1781xlog_check_buffer_cancelled(
1782 struct xlog *log,
1783 xfs_daddr_t blkno,
1784 uint len,
1785 ushort flags)
1786{
1787 struct list_head *bucket;
1788 struct xfs_buf_cancel *bcp;
1789
1790 if (log->l_buf_cancel_table == NULL) {
1791
1792
1793
1794
1795 ASSERT(!(flags & XFS_BLF_CANCEL));
1796 return 0;
1797 }
1798
1799
1800
1801
1802 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1803 list_for_each_entry(bcp, bucket, bc_list) {
1804 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1805 goto found;
1806 }
1807
1808
1809
1810
1811
1812 ASSERT(!(flags & XFS_BLF_CANCEL));
1813 return 0;
1814
1815found:
1816
1817
1818
1819
1820
1821
1822 if (flags & XFS_BLF_CANCEL) {
1823 if (--bcp->bc_refcount == 0) {
1824 list_del(&bcp->bc_list);
1825 kmem_free(bcp);
1826 }
1827 }
1828 return 1;
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843STATIC int
1844xlog_recover_do_inode_buffer(
1845 struct xfs_mount *mp,
1846 xlog_recover_item_t *item,
1847 struct xfs_buf *bp,
1848 xfs_buf_log_format_t *buf_f)
1849{
1850 int i;
1851 int item_index = 0;
1852 int bit = 0;
1853 int nbits = 0;
1854 int reg_buf_offset = 0;
1855 int reg_buf_bytes = 0;
1856 int next_unlinked_offset;
1857 int inodes_per_buf;
1858 xfs_agino_t *logged_nextp;
1859 xfs_agino_t *buffer_nextp;
1860
1861 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1862
1863
1864
1865
1866
1867 if (xfs_sb_version_hascrc(&mp->m_sb))
1868 bp->b_ops = &xfs_inode_buf_ops;
1869
1870 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1871 for (i = 0; i < inodes_per_buf; i++) {
1872 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1873 offsetof(xfs_dinode_t, di_next_unlinked);
1874
1875 while (next_unlinked_offset >=
1876 (reg_buf_offset + reg_buf_bytes)) {
1877
1878
1879
1880
1881
1882
1883 bit += nbits;
1884 bit = xfs_next_bit(buf_f->blf_data_map,
1885 buf_f->blf_map_size, bit);
1886
1887
1888
1889
1890
1891 if (bit == -1)
1892 return 0;
1893
1894 nbits = xfs_contig_bits(buf_f->blf_data_map,
1895 buf_f->blf_map_size, bit);
1896 ASSERT(nbits > 0);
1897 reg_buf_offset = bit << XFS_BLF_SHIFT;
1898 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1899 item_index++;
1900 }
1901
1902
1903
1904
1905
1906
1907 if (next_unlinked_offset < reg_buf_offset)
1908 continue;
1909
1910 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1911 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1912 ASSERT((reg_buf_offset + reg_buf_bytes) <=
1913 BBTOB(bp->b_io_length));
1914
1915
1916
1917
1918
1919
1920 logged_nextp = item->ri_buf[item_index].i_addr +
1921 next_unlinked_offset - reg_buf_offset;
1922 if (unlikely(*logged_nextp == 0)) {
1923 xfs_alert(mp,
1924 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1925 "Trying to replay bad (0) inode di_next_unlinked field.",
1926 item, bp);
1927 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1928 XFS_ERRLEVEL_LOW, mp);
1929 return XFS_ERROR(EFSCORRUPTED);
1930 }
1931
1932 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1933 next_unlinked_offset);
1934 *buffer_nextp = *logged_nextp;
1935
1936
1937
1938
1939
1940
1941 xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1942 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1943
1944 }
1945
1946 return 0;
1947}
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957static void
1958xlog_recovery_validate_buf_type(
1959 struct xfs_mount *mp,
1960 struct xfs_buf *bp,
1961 xfs_buf_log_format_t *buf_f)
1962{
1963 struct xfs_da_blkinfo *info = bp->b_addr;
1964 __uint32_t magic32;
1965 __uint16_t magic16;
1966 __uint16_t magicda;
1967
1968 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
1969 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
1970 magicda = be16_to_cpu(info->magic);
1971 switch (xfs_blft_from_flags(buf_f)) {
1972 case XFS_BLFT_BTREE_BUF:
1973 switch (magic32) {
1974 case XFS_ABTB_CRC_MAGIC:
1975 case XFS_ABTC_CRC_MAGIC:
1976 case XFS_ABTB_MAGIC:
1977 case XFS_ABTC_MAGIC:
1978 bp->b_ops = &xfs_allocbt_buf_ops;
1979 break;
1980 case XFS_IBT_CRC_MAGIC:
1981 case XFS_IBT_MAGIC:
1982 bp->b_ops = &xfs_inobt_buf_ops;
1983 break;
1984 case XFS_BMAP_CRC_MAGIC:
1985 case XFS_BMAP_MAGIC:
1986 bp->b_ops = &xfs_bmbt_buf_ops;
1987 break;
1988 default:
1989 xfs_warn(mp, "Bad btree block magic!");
1990 ASSERT(0);
1991 break;
1992 }
1993 break;
1994 case XFS_BLFT_AGF_BUF:
1995 if (magic32 != XFS_AGF_MAGIC) {
1996 xfs_warn(mp, "Bad AGF block magic!");
1997 ASSERT(0);
1998 break;
1999 }
2000 bp->b_ops = &xfs_agf_buf_ops;
2001 break;
2002 case XFS_BLFT_AGFL_BUF:
2003 if (!xfs_sb_version_hascrc(&mp->m_sb))
2004 break;
2005 if (magic32 != XFS_AGFL_MAGIC) {
2006 xfs_warn(mp, "Bad AGFL block magic!");
2007 ASSERT(0);
2008 break;
2009 }
2010 bp->b_ops = &xfs_agfl_buf_ops;
2011 break;
2012 case XFS_BLFT_AGI_BUF:
2013 if (magic32 != XFS_AGI_MAGIC) {
2014 xfs_warn(mp, "Bad AGI block magic!");
2015 ASSERT(0);
2016 break;
2017 }
2018 bp->b_ops = &xfs_agi_buf_ops;
2019 break;
2020 case XFS_BLFT_UDQUOT_BUF:
2021 case XFS_BLFT_PDQUOT_BUF:
2022 case XFS_BLFT_GDQUOT_BUF:
2023#ifdef CONFIG_XFS_QUOTA
2024 if (magic16 != XFS_DQUOT_MAGIC) {
2025 xfs_warn(mp, "Bad DQUOT block magic!");
2026 ASSERT(0);
2027 break;
2028 }
2029 bp->b_ops = &xfs_dquot_buf_ops;
2030#else
2031 xfs_alert(mp,
2032 "Trying to recover dquots without QUOTA support built in!");
2033 ASSERT(0);
2034#endif
2035 break;
2036 case XFS_BLFT_DINO_BUF:
2037
2038
2039
2040
2041 if (magic16 != XFS_DINODE_MAGIC) {
2042 xfs_warn(mp, "Bad INODE block magic!");
2043 ASSERT(0);
2044 break;
2045 }
2046 bp->b_ops = &xfs_inode_buf_ops;
2047 break;
2048 case XFS_BLFT_SYMLINK_BUF:
2049 if (magic32 != XFS_SYMLINK_MAGIC) {
2050 xfs_warn(mp, "Bad symlink block magic!");
2051 ASSERT(0);
2052 break;
2053 }
2054 bp->b_ops = &xfs_symlink_buf_ops;
2055 break;
2056 case XFS_BLFT_DIR_BLOCK_BUF:
2057 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2058 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2059 xfs_warn(mp, "Bad dir block magic!");
2060 ASSERT(0);
2061 break;
2062 }
2063 bp->b_ops = &xfs_dir3_block_buf_ops;
2064 break;
2065 case XFS_BLFT_DIR_DATA_BUF:
2066 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2067 magic32 != XFS_DIR3_DATA_MAGIC) {
2068 xfs_warn(mp, "Bad dir data magic!");
2069 ASSERT(0);
2070 break;
2071 }
2072 bp->b_ops = &xfs_dir3_data_buf_ops;
2073 break;
2074 case XFS_BLFT_DIR_FREE_BUF:
2075 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2076 magic32 != XFS_DIR3_FREE_MAGIC) {
2077 xfs_warn(mp, "Bad dir3 free magic!");
2078 ASSERT(0);
2079 break;
2080 }
2081 bp->b_ops = &xfs_dir3_free_buf_ops;
2082 break;
2083 case XFS_BLFT_DIR_LEAF1_BUF:
2084 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2085 magicda != XFS_DIR3_LEAF1_MAGIC) {
2086 xfs_warn(mp, "Bad dir leaf1 magic!");
2087 ASSERT(0);
2088 break;
2089 }
2090 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2091 break;
2092 case XFS_BLFT_DIR_LEAFN_BUF:
2093 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2094 magicda != XFS_DIR3_LEAFN_MAGIC) {
2095 xfs_warn(mp, "Bad dir leafn magic!");
2096 ASSERT(0);
2097 break;
2098 }
2099 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2100 break;
2101 case XFS_BLFT_DA_NODE_BUF:
2102 if (magicda != XFS_DA_NODE_MAGIC &&
2103 magicda != XFS_DA3_NODE_MAGIC) {
2104 xfs_warn(mp, "Bad da node magic!");
2105 ASSERT(0);
2106 break;
2107 }
2108 bp->b_ops = &xfs_da3_node_buf_ops;
2109 break;
2110 case XFS_BLFT_ATTR_LEAF_BUF:
2111 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2112 magicda != XFS_ATTR3_LEAF_MAGIC) {
2113 xfs_warn(mp, "Bad attr leaf magic!");
2114 ASSERT(0);
2115 break;
2116 }
2117 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2118 break;
2119 case XFS_BLFT_ATTR_RMT_BUF:
2120 if (!xfs_sb_version_hascrc(&mp->m_sb))
2121 break;
2122 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2123 xfs_warn(mp, "Bad attr remote magic!");
2124 ASSERT(0);
2125 break;
2126 }
2127 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2128 break;
2129 case XFS_BLFT_SB_BUF:
2130 if (magic32 != XFS_SB_MAGIC) {
2131 xfs_warn(mp, "Bad SB block magic!");
2132 ASSERT(0);
2133 break;
2134 }
2135 bp->b_ops = &xfs_sb_buf_ops;
2136 break;
2137 default:
2138 xfs_warn(mp, "Unknown buffer type %d!",
2139 xfs_blft_from_flags(buf_f));
2140 break;
2141 }
2142}
2143
2144
2145
2146
2147
2148
2149
2150STATIC void
2151xlog_recover_do_reg_buffer(
2152 struct xfs_mount *mp,
2153 xlog_recover_item_t *item,
2154 struct xfs_buf *bp,
2155 xfs_buf_log_format_t *buf_f)
2156{
2157 int i;
2158 int bit;
2159 int nbits;
2160 int error;
2161
2162 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2163
2164 bit = 0;
2165 i = 1;
2166 while (1) {
2167 bit = xfs_next_bit(buf_f->blf_data_map,
2168 buf_f->blf_map_size, bit);
2169 if (bit == -1)
2170 break;
2171 nbits = xfs_contig_bits(buf_f->blf_data_map,
2172 buf_f->blf_map_size, bit);
2173 ASSERT(nbits > 0);
2174 ASSERT(item->ri_buf[i].i_addr != NULL);
2175 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2176 ASSERT(BBTOB(bp->b_io_length) >=
2177 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2188 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2189
2190
2191
2192
2193
2194
2195 error = 0;
2196 if (buf_f->blf_flags &
2197 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2198 if (item->ri_buf[i].i_addr == NULL) {
2199 xfs_alert(mp,
2200 "XFS: NULL dquot in %s.", __func__);
2201 goto next;
2202 }
2203 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2204 xfs_alert(mp,
2205 "XFS: dquot too small (%d) in %s.",
2206 item->ri_buf[i].i_len, __func__);
2207 goto next;
2208 }
2209 error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
2210 -1, 0, XFS_QMOPT_DOWARN,
2211 "dquot_buf_recover");
2212 if (error)
2213 goto next;
2214 }
2215
2216 memcpy(xfs_buf_offset(bp,
2217 (uint)bit << XFS_BLF_SHIFT),
2218 item->ri_buf[i].i_addr,
2219 nbits<<XFS_BLF_SHIFT);
2220 next:
2221 i++;
2222 bit += nbits;
2223 }
2224
2225
2226 ASSERT(i == item->ri_total);
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236 if (xfs_sb_version_hascrc(&mp->m_sb))
2237 xlog_recovery_validate_buf_type(mp, bp, buf_f);
2238}
2239
2240
2241
2242
2243int
2244xfs_qm_dqcheck(
2245 struct xfs_mount *mp,
2246 xfs_disk_dquot_t *ddq,
2247 xfs_dqid_t id,
2248 uint type,
2249 uint flags,
2250 char *str)
2251{
2252 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
2253 int errs = 0;
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
2271 if (flags & XFS_QMOPT_DOWARN)
2272 xfs_alert(mp,
2273 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2274 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2275 errs++;
2276 }
2277 if (ddq->d_version != XFS_DQUOT_VERSION) {
2278 if (flags & XFS_QMOPT_DOWARN)
2279 xfs_alert(mp,
2280 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2281 str, id, ddq->d_version, XFS_DQUOT_VERSION);
2282 errs++;
2283 }
2284
2285 if (ddq->d_flags != XFS_DQ_USER &&
2286 ddq->d_flags != XFS_DQ_PROJ &&
2287 ddq->d_flags != XFS_DQ_GROUP) {
2288 if (flags & XFS_QMOPT_DOWARN)
2289 xfs_alert(mp,
2290 "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2291 str, id, ddq->d_flags);
2292 errs++;
2293 }
2294
2295 if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2296 if (flags & XFS_QMOPT_DOWARN)
2297 xfs_alert(mp,
2298 "%s : ondisk-dquot 0x%p, ID mismatch: "
2299 "0x%x expected, found id 0x%x",
2300 str, ddq, id, be32_to_cpu(ddq->d_id));
2301 errs++;
2302 }
2303
2304 if (!errs && ddq->d_id) {
2305 if (ddq->d_blk_softlimit &&
2306 be64_to_cpu(ddq->d_bcount) >
2307 be64_to_cpu(ddq->d_blk_softlimit)) {
2308 if (!ddq->d_btimer) {
2309 if (flags & XFS_QMOPT_DOWARN)
2310 xfs_alert(mp,
2311 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2312 str, (int)be32_to_cpu(ddq->d_id), ddq);
2313 errs++;
2314 }
2315 }
2316 if (ddq->d_ino_softlimit &&
2317 be64_to_cpu(ddq->d_icount) >
2318 be64_to_cpu(ddq->d_ino_softlimit)) {
2319 if (!ddq->d_itimer) {
2320 if (flags & XFS_QMOPT_DOWARN)
2321 xfs_alert(mp,
2322 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2323 str, (int)be32_to_cpu(ddq->d_id), ddq);
2324 errs++;
2325 }
2326 }
2327 if (ddq->d_rtb_softlimit &&
2328 be64_to_cpu(ddq->d_rtbcount) >
2329 be64_to_cpu(ddq->d_rtb_softlimit)) {
2330 if (!ddq->d_rtbtimer) {
2331 if (flags & XFS_QMOPT_DOWARN)
2332 xfs_alert(mp,
2333 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2334 str, (int)be32_to_cpu(ddq->d_id), ddq);
2335 errs++;
2336 }
2337 }
2338 }
2339
2340 if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2341 return errs;
2342
2343 if (flags & XFS_QMOPT_DOWARN)
2344 xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2345
2346
2347
2348
2349 ASSERT(id != -1);
2350 ASSERT(flags & XFS_QMOPT_DQREPAIR);
2351 memset(d, 0, sizeof(xfs_dqblk_t));
2352
2353 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2354 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2355 d->dd_diskdq.d_flags = type;
2356 d->dd_diskdq.d_id = cpu_to_be32(id);
2357
2358 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2359 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
2360 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2361 XFS_DQUOT_CRC_OFF);
2362 }
2363
2364 return errs;
2365}
2366
2367
2368
2369
2370
2371
2372
2373STATIC void
2374xlog_recover_do_dquot_buffer(
2375 struct xfs_mount *mp,
2376 struct xlog *log,
2377 struct xlog_recover_item *item,
2378 struct xfs_buf *bp,
2379 struct xfs_buf_log_format *buf_f)
2380{
2381 uint type;
2382
2383 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2384
2385
2386
2387
2388 if (mp->m_qflags == 0) {
2389 return;
2390 }
2391
2392 type = 0;
2393 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2394 type |= XFS_DQ_USER;
2395 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2396 type |= XFS_DQ_PROJ;
2397 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2398 type |= XFS_DQ_GROUP;
2399
2400
2401
2402 if (log->l_quotaoffs_flag & type)
2403 return;
2404
2405 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2406}
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431STATIC int
2432xlog_recover_buffer_pass2(
2433 struct xlog *log,
2434 struct list_head *buffer_list,
2435 struct xlog_recover_item *item)
2436{
2437 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2438 xfs_mount_t *mp = log->l_mp;
2439 xfs_buf_t *bp;
2440 int error;
2441 uint buf_flags;
2442
2443
2444
2445
2446
2447 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2448 buf_f->blf_len, buf_f->blf_flags)) {
2449 trace_xfs_log_recover_buf_cancel(log, buf_f);
2450 return 0;
2451 }
2452
2453 trace_xfs_log_recover_buf_recover(log, buf_f);
2454
2455 buf_flags = 0;
2456 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2457 buf_flags |= XBF_UNMAPPED;
2458
2459 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2460 buf_flags, NULL);
2461 if (!bp)
2462 return XFS_ERROR(ENOMEM);
2463 error = bp->b_error;
2464 if (error) {
2465 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2466 xfs_buf_relse(bp);
2467 return error;
2468 }
2469
2470 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2471 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2472 } else if (buf_f->blf_flags &
2473 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2474 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2475 } else {
2476 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2477 }
2478 if (error)
2479 return XFS_ERROR(error);
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496 if (XFS_DINODE_MAGIC ==
2497 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2498 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2499 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2500 xfs_buf_stale(bp);
2501 error = xfs_bwrite(bp);
2502 } else {
2503 ASSERT(bp->b_target->bt_mount == mp);
2504 bp->b_iodone = xlog_recover_iodone;
2505 xfs_buf_delwri_queue(bp, buffer_list);
2506 }
2507
2508 xfs_buf_relse(bp);
2509 return error;
2510}
2511
2512STATIC int
2513xlog_recover_inode_pass2(
2514 struct xlog *log,
2515 struct list_head *buffer_list,
2516 struct xlog_recover_item *item)
2517{
2518 xfs_inode_log_format_t *in_f;
2519 xfs_mount_t *mp = log->l_mp;
2520 xfs_buf_t *bp;
2521 xfs_dinode_t *dip;
2522 int len;
2523 xfs_caddr_t src;
2524 xfs_caddr_t dest;
2525 int error;
2526 int attr_index;
2527 uint fields;
2528 xfs_icdinode_t *dicp;
2529 uint isize;
2530 int need_free = 0;
2531
2532 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2533 in_f = item->ri_buf[0].i_addr;
2534 } else {
2535 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2536 need_free = 1;
2537 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2538 if (error)
2539 goto error;
2540 }
2541
2542
2543
2544
2545
2546 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2547 in_f->ilf_len, 0)) {
2548 error = 0;
2549 trace_xfs_log_recover_inode_cancel(log, in_f);
2550 goto error;
2551 }
2552 trace_xfs_log_recover_inode_recover(log, in_f);
2553
2554 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2555 &xfs_inode_buf_ops);
2556 if (!bp) {
2557 error = ENOMEM;
2558 goto error;
2559 }
2560 error = bp->b_error;
2561 if (error) {
2562 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2563 xfs_buf_relse(bp);
2564 goto error;
2565 }
2566 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2567 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2568
2569
2570
2571
2572
2573 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2574 xfs_buf_relse(bp);
2575 xfs_alert(mp,
2576 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2577 __func__, dip, bp, in_f->ilf_ino);
2578 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2579 XFS_ERRLEVEL_LOW, mp);
2580 error = EFSCORRUPTED;
2581 goto error;
2582 }
2583 dicp = item->ri_buf[1].i_addr;
2584 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2585 xfs_buf_relse(bp);
2586 xfs_alert(mp,
2587 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2588 __func__, item, in_f->ilf_ino);
2589 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2590 XFS_ERRLEVEL_LOW, mp);
2591 error = EFSCORRUPTED;
2592 goto error;
2593 }
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2604 dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2605
2606
2607
2608
2609 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2610 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2611
2612 } else {
2613 xfs_buf_relse(bp);
2614 trace_xfs_log_recover_inode_skip(log, in_f);
2615 error = 0;
2616 goto error;
2617 }
2618 }
2619
2620
2621 dicp->di_flushiter = 0;
2622
2623 if (unlikely(S_ISREG(dicp->di_mode))) {
2624 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2625 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2626 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2627 XFS_ERRLEVEL_LOW, mp, dicp);
2628 xfs_buf_relse(bp);
2629 xfs_alert(mp,
2630 "%s: Bad regular inode log record, rec ptr 0x%p, "
2631 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2632 __func__, item, dip, bp, in_f->ilf_ino);
2633 error = EFSCORRUPTED;
2634 goto error;
2635 }
2636 } else if (unlikely(S_ISDIR(dicp->di_mode))) {
2637 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2638 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2639 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2640 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2641 XFS_ERRLEVEL_LOW, mp, dicp);
2642 xfs_buf_relse(bp);
2643 xfs_alert(mp,
2644 "%s: Bad dir inode log record, rec ptr 0x%p, "
2645 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2646 __func__, item, dip, bp, in_f->ilf_ino);
2647 error = EFSCORRUPTED;
2648 goto error;
2649 }
2650 }
2651 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2652 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2653 XFS_ERRLEVEL_LOW, mp, dicp);
2654 xfs_buf_relse(bp);
2655 xfs_alert(mp,
2656 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2657 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2658 __func__, item, dip, bp, in_f->ilf_ino,
2659 dicp->di_nextents + dicp->di_anextents,
2660 dicp->di_nblocks);
2661 error = EFSCORRUPTED;
2662 goto error;
2663 }
2664 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2665 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2666 XFS_ERRLEVEL_LOW, mp, dicp);
2667 xfs_buf_relse(bp);
2668 xfs_alert(mp,
2669 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2670 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2671 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2672 error = EFSCORRUPTED;
2673 goto error;
2674 }
2675 isize = xfs_icdinode_size(dicp->di_version);
2676 if (unlikely(item->ri_buf[1].i_len > isize)) {
2677 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2678 XFS_ERRLEVEL_LOW, mp, dicp);
2679 xfs_buf_relse(bp);
2680 xfs_alert(mp,
2681 "%s: Bad inode log record length %d, rec ptr 0x%p",
2682 __func__, item->ri_buf[1].i_len, item);
2683 error = EFSCORRUPTED;
2684 goto error;
2685 }
2686
2687
2688 xfs_dinode_to_disk(dip, dicp);
2689
2690
2691 if (item->ri_buf[1].i_len > isize) {
2692 memcpy((char *)dip + isize,
2693 item->ri_buf[1].i_addr + isize,
2694 item->ri_buf[1].i_len - isize);
2695 }
2696
2697 fields = in_f->ilf_fields;
2698 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2699 case XFS_ILOG_DEV:
2700 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2701 break;
2702 case XFS_ILOG_UUID:
2703 memcpy(XFS_DFORK_DPTR(dip),
2704 &in_f->ilf_u.ilfu_uuid,
2705 sizeof(uuid_t));
2706 break;
2707 }
2708
2709 if (in_f->ilf_size == 2)
2710 goto write_inode_buffer;
2711 len = item->ri_buf[2].i_len;
2712 src = item->ri_buf[2].i_addr;
2713 ASSERT(in_f->ilf_size <= 4);
2714 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2715 ASSERT(!(fields & XFS_ILOG_DFORK) ||
2716 (len == in_f->ilf_dsize));
2717
2718 switch (fields & XFS_ILOG_DFORK) {
2719 case XFS_ILOG_DDATA:
2720 case XFS_ILOG_DEXT:
2721 memcpy(XFS_DFORK_DPTR(dip), src, len);
2722 break;
2723
2724 case XFS_ILOG_DBROOT:
2725 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2726 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2727 XFS_DFORK_DSIZE(dip, mp));
2728 break;
2729
2730 default:
2731
2732
2733
2734 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2735 break;
2736 }
2737
2738
2739
2740
2741
2742
2743 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2744 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2745 attr_index = 3;
2746 } else {
2747 attr_index = 2;
2748 }
2749 len = item->ri_buf[attr_index].i_len;
2750 src = item->ri_buf[attr_index].i_addr;
2751 ASSERT(len == in_f->ilf_asize);
2752
2753 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2754 case XFS_ILOG_ADATA:
2755 case XFS_ILOG_AEXT:
2756 dest = XFS_DFORK_APTR(dip);
2757 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2758 memcpy(dest, src, len);
2759 break;
2760
2761 case XFS_ILOG_ABROOT:
2762 dest = XFS_DFORK_APTR(dip);
2763 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2764 len, (xfs_bmdr_block_t*)dest,
2765 XFS_DFORK_ASIZE(dip, mp));
2766 break;
2767
2768 default:
2769 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2770 ASSERT(0);
2771 xfs_buf_relse(bp);
2772 error = EIO;
2773 goto error;
2774 }
2775 }
2776
2777write_inode_buffer:
2778
2779 xfs_dinode_calc_crc(log->l_mp, dip);
2780
2781 ASSERT(bp->b_target->bt_mount == mp);
2782 bp->b_iodone = xlog_recover_iodone;
2783 xfs_buf_delwri_queue(bp, buffer_list);
2784 xfs_buf_relse(bp);
2785error:
2786 if (need_free)
2787 kmem_free(in_f);
2788 return XFS_ERROR(error);
2789}
2790
2791
2792
2793
2794
2795
2796STATIC int
2797xlog_recover_quotaoff_pass1(
2798 struct xlog *log,
2799 struct xlog_recover_item *item)
2800{
2801 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
2802 ASSERT(qoff_f);
2803
2804
2805
2806
2807
2808 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2809 log->l_quotaoffs_flag |= XFS_DQ_USER;
2810 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2811 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2812 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2813 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2814
2815 return (0);
2816}
2817
2818
2819
2820
2821STATIC int
2822xlog_recover_dquot_pass2(
2823 struct xlog *log,
2824 struct list_head *buffer_list,
2825 struct xlog_recover_item *item)
2826{
2827 xfs_mount_t *mp = log->l_mp;
2828 xfs_buf_t *bp;
2829 struct xfs_disk_dquot *ddq, *recddq;
2830 int error;
2831 xfs_dq_logformat_t *dq_f;
2832 uint type;
2833
2834
2835
2836
2837
2838 if (mp->m_qflags == 0)
2839 return (0);
2840
2841 recddq = item->ri_buf[1].i_addr;
2842 if (recddq == NULL) {
2843 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2844 return XFS_ERROR(EIO);
2845 }
2846 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2847 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2848 item->ri_buf[1].i_len, __func__);
2849 return XFS_ERROR(EIO);
2850 }
2851
2852
2853
2854
2855 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2856 ASSERT(type);
2857 if (log->l_quotaoffs_flag & type)
2858 return (0);
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870 dq_f = item->ri_buf[0].i_addr;
2871 ASSERT(dq_f);
2872 error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2873 "xlog_recover_dquot_pass2 (log copy)");
2874 if (error)
2875 return XFS_ERROR(EIO);
2876 ASSERT(dq_f->qlf_len == 1);
2877
2878 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
2879 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
2880 NULL);
2881 if (error)
2882 return error;
2883
2884 ASSERT(bp);
2885 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2886
2887
2888
2889
2890
2891
2892 error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2893 "xlog_recover_dquot_pass2");
2894 if (error) {
2895 xfs_buf_relse(bp);
2896 return XFS_ERROR(EIO);
2897 }
2898
2899 memcpy(ddq, recddq, item->ri_buf[1].i_len);
2900 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2901 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
2902 XFS_DQUOT_CRC_OFF);
2903 }
2904
2905 ASSERT(dq_f->qlf_size == 2);
2906 ASSERT(bp->b_target->bt_mount == mp);
2907 bp->b_iodone = xlog_recover_iodone;
2908 xfs_buf_delwri_queue(bp, buffer_list);
2909 xfs_buf_relse(bp);
2910
2911 return (0);
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921STATIC int
2922xlog_recover_efi_pass2(
2923 struct xlog *log,
2924 struct xlog_recover_item *item,
2925 xfs_lsn_t lsn)
2926{
2927 int error;
2928 xfs_mount_t *mp = log->l_mp;
2929 xfs_efi_log_item_t *efip;
2930 xfs_efi_log_format_t *efi_formatp;
2931
2932 efi_formatp = item->ri_buf[0].i_addr;
2933
2934 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2935 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2936 &(efip->efi_format)))) {
2937 xfs_efi_item_free(efip);
2938 return error;
2939 }
2940 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2941
2942 spin_lock(&log->l_ailp->xa_lock);
2943
2944
2945
2946 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2947 return 0;
2948}
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959STATIC int
2960xlog_recover_efd_pass2(
2961 struct xlog *log,
2962 struct xlog_recover_item *item)
2963{
2964 xfs_efd_log_format_t *efd_formatp;
2965 xfs_efi_log_item_t *efip = NULL;
2966 xfs_log_item_t *lip;
2967 __uint64_t efi_id;
2968 struct xfs_ail_cursor cur;
2969 struct xfs_ail *ailp = log->l_ailp;
2970
2971 efd_formatp = item->ri_buf[0].i_addr;
2972 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2973 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2974 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2975 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2976 efi_id = efd_formatp->efd_efi_id;
2977
2978
2979
2980
2981
2982 spin_lock(&ailp->xa_lock);
2983 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2984 while (lip != NULL) {
2985 if (lip->li_type == XFS_LI_EFI) {
2986 efip = (xfs_efi_log_item_t *)lip;
2987 if (efip->efi_format.efi_id == efi_id) {
2988
2989
2990
2991
2992 xfs_trans_ail_delete(ailp, lip,
2993 SHUTDOWN_CORRUPT_INCORE);
2994 xfs_efi_item_free(efip);
2995 spin_lock(&ailp->xa_lock);
2996 break;
2997 }
2998 }
2999 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3000 }
3001 xfs_trans_ail_cursor_done(ailp, &cur);
3002 spin_unlock(&ailp->xa_lock);
3003
3004 return 0;
3005}
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015STATIC int
3016xlog_recover_do_icreate_pass2(
3017 struct xlog *log,
3018 struct list_head *buffer_list,
3019 xlog_recover_item_t *item)
3020{
3021 struct xfs_mount *mp = log->l_mp;
3022 struct xfs_icreate_log *icl;
3023 xfs_agnumber_t agno;
3024 xfs_agblock_t agbno;
3025 unsigned int count;
3026 unsigned int isize;
3027 xfs_agblock_t length;
3028
3029 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3030 if (icl->icl_type != XFS_LI_ICREATE) {
3031 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3032 return EINVAL;
3033 }
3034
3035 if (icl->icl_size != 1) {
3036 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3037 return EINVAL;
3038 }
3039
3040 agno = be32_to_cpu(icl->icl_ag);
3041 if (agno >= mp->m_sb.sb_agcount) {
3042 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3043 return EINVAL;
3044 }
3045 agbno = be32_to_cpu(icl->icl_agbno);
3046 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3047 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3048 return EINVAL;
3049 }
3050 isize = be32_to_cpu(icl->icl_isize);
3051 if (isize != mp->m_sb.sb_inodesize) {
3052 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3053 return EINVAL;
3054 }
3055 count = be32_to_cpu(icl->icl_count);
3056 if (!count) {
3057 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3058 return EINVAL;
3059 }
3060 length = be32_to_cpu(icl->icl_length);
3061 if (!length || length >= mp->m_sb.sb_agblocks) {
3062 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3063 return EINVAL;
3064 }
3065
3066
3067 ASSERT(count == XFS_IALLOC_INODES(mp));
3068 ASSERT(length == XFS_IALLOC_BLOCKS(mp));
3069 if (count != XFS_IALLOC_INODES(mp) ||
3070 length != XFS_IALLOC_BLOCKS(mp)) {
3071 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
3072 return EINVAL;
3073 }
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085 if (xlog_check_buffer_cancelled(log,
3086 XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
3087 return 0;
3088
3089 xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
3090 be32_to_cpu(icl->icl_gen));
3091 return 0;
3092}
3093
3094
3095
3096
3097
3098
3099STATIC void
3100xlog_recover_free_trans(
3101 struct xlog_recover *trans)
3102{
3103 xlog_recover_item_t *item, *n;
3104 int i;
3105
3106 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3107
3108 list_del(&item->ri_list);
3109 for (i = 0; i < item->ri_cnt; i++)
3110 kmem_free(item->ri_buf[i].i_addr);
3111
3112 kmem_free(item->ri_buf);
3113 kmem_free(item);
3114 }
3115
3116 kmem_free(trans);
3117}
3118
3119STATIC int
3120xlog_recover_commit_pass1(
3121 struct xlog *log,
3122 struct xlog_recover *trans,
3123 struct xlog_recover_item *item)
3124{
3125 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3126
3127 switch (ITEM_TYPE(item)) {
3128 case XFS_LI_BUF:
3129 return xlog_recover_buffer_pass1(log, item);
3130 case XFS_LI_QUOTAOFF:
3131 return xlog_recover_quotaoff_pass1(log, item);
3132 case XFS_LI_INODE:
3133 case XFS_LI_EFI:
3134 case XFS_LI_EFD:
3135 case XFS_LI_DQUOT:
3136 case XFS_LI_ICREATE:
3137
3138 return 0;
3139 default:
3140 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3141 __func__, ITEM_TYPE(item));
3142 ASSERT(0);
3143 return XFS_ERROR(EIO);
3144 }
3145}
3146
3147STATIC int
3148xlog_recover_commit_pass2(
3149 struct xlog *log,
3150 struct xlog_recover *trans,
3151 struct list_head *buffer_list,
3152 struct xlog_recover_item *item)
3153{
3154 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3155
3156 switch (ITEM_TYPE(item)) {
3157 case XFS_LI_BUF:
3158 return xlog_recover_buffer_pass2(log, buffer_list, item);
3159 case XFS_LI_INODE:
3160 return xlog_recover_inode_pass2(log, buffer_list, item);
3161 case XFS_LI_EFI:
3162 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3163 case XFS_LI_EFD:
3164 return xlog_recover_efd_pass2(log, item);
3165 case XFS_LI_DQUOT:
3166 return xlog_recover_dquot_pass2(log, buffer_list, item);
3167 case XFS_LI_ICREATE:
3168 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3169 case XFS_LI_QUOTAOFF:
3170
3171 return 0;
3172 default:
3173 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3174 __func__, ITEM_TYPE(item));
3175 ASSERT(0);
3176 return XFS_ERROR(EIO);
3177 }
3178}
3179
3180
3181
3182
3183
3184
3185
3186STATIC int
3187xlog_recover_commit_trans(
3188 struct xlog *log,
3189 struct xlog_recover *trans,
3190 int pass)
3191{
3192 int error = 0, error2;
3193 xlog_recover_item_t *item;
3194 LIST_HEAD (buffer_list);
3195
3196 hlist_del(&trans->r_list);
3197
3198 error = xlog_recover_reorder_trans(log, trans, pass);
3199 if (error)
3200 return error;
3201
3202 list_for_each_entry(item, &trans->r_itemq, ri_list) {
3203 switch (pass) {
3204 case XLOG_RECOVER_PASS1:
3205 error = xlog_recover_commit_pass1(log, trans, item);
3206 break;
3207 case XLOG_RECOVER_PASS2:
3208 error = xlog_recover_commit_pass2(log, trans,
3209 &buffer_list, item);
3210 break;
3211 default:
3212 ASSERT(0);
3213 }
3214
3215 if (error)
3216 goto out;
3217 }
3218
3219 xlog_recover_free_trans(trans);
3220
3221out:
3222 error2 = xfs_buf_delwri_submit(&buffer_list);
3223 return error ? error : error2;
3224}
3225
3226STATIC int
3227xlog_recover_unmount_trans(
3228 struct xlog *log,
3229 struct xlog_recover *trans)
3230{
3231
3232 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3233 return 0;
3234}
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245STATIC int
3246xlog_recover_process_data(
3247 struct xlog *log,
3248 struct hlist_head rhash[],
3249 struct xlog_rec_header *rhead,
3250 xfs_caddr_t dp,
3251 int pass)
3252{
3253 xfs_caddr_t lp;
3254 int num_logops;
3255 xlog_op_header_t *ohead;
3256 xlog_recover_t *trans;
3257 xlog_tid_t tid;
3258 int error;
3259 unsigned long hash;
3260 uint flags;
3261
3262 lp = dp + be32_to_cpu(rhead->h_len);
3263 num_logops = be32_to_cpu(rhead->h_num_logops);
3264
3265
3266 if (xlog_header_check_recover(log->l_mp, rhead))
3267 return (XFS_ERROR(EIO));
3268
3269 while ((dp < lp) && num_logops) {
3270 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
3271 ohead = (xlog_op_header_t *)dp;
3272 dp += sizeof(xlog_op_header_t);
3273 if (ohead->oh_clientid != XFS_TRANSACTION &&
3274 ohead->oh_clientid != XFS_LOG) {
3275 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
3276 __func__, ohead->oh_clientid);
3277 ASSERT(0);
3278 return (XFS_ERROR(EIO));
3279 }
3280 tid = be32_to_cpu(ohead->oh_tid);
3281 hash = XLOG_RHASH(tid);
3282 trans = xlog_recover_find_tid(&rhash[hash], tid);
3283 if (trans == NULL) {
3284 if (ohead->oh_flags & XLOG_START_TRANS)
3285 xlog_recover_new_tid(&rhash[hash], tid,
3286 be64_to_cpu(rhead->h_lsn));
3287 } else {
3288 if (dp + be32_to_cpu(ohead->oh_len) > lp) {
3289 xfs_warn(log->l_mp, "%s: bad length 0x%x",
3290 __func__, be32_to_cpu(ohead->oh_len));
3291 WARN_ON(1);
3292 return (XFS_ERROR(EIO));
3293 }
3294 flags = ohead->oh_flags & ~XLOG_END_TRANS;
3295 if (flags & XLOG_WAS_CONT_TRANS)
3296 flags &= ~XLOG_CONTINUE_TRANS;
3297 switch (flags) {
3298 case XLOG_COMMIT_TRANS:
3299 error = xlog_recover_commit_trans(log,
3300 trans, pass);
3301 break;
3302 case XLOG_UNMOUNT_TRANS:
3303 error = xlog_recover_unmount_trans(log, trans);
3304 break;
3305 case XLOG_WAS_CONT_TRANS:
3306 error = xlog_recover_add_to_cont_trans(log,
3307 trans, dp,
3308 be32_to_cpu(ohead->oh_len));
3309 break;
3310 case XLOG_START_TRANS:
3311 xfs_warn(log->l_mp, "%s: bad transaction",
3312 __func__);
3313 ASSERT(0);
3314 error = XFS_ERROR(EIO);
3315 break;
3316 case 0:
3317 case XLOG_CONTINUE_TRANS:
3318 error = xlog_recover_add_to_trans(log, trans,
3319 dp, be32_to_cpu(ohead->oh_len));
3320 break;
3321 default:
3322 xfs_warn(log->l_mp, "%s: bad flag 0x%x",
3323 __func__, flags);
3324 ASSERT(0);
3325 error = XFS_ERROR(EIO);
3326 break;
3327 }
3328 if (error)
3329 return error;
3330 }
3331 dp += be32_to_cpu(ohead->oh_len);
3332 num_logops--;
3333 }
3334 return 0;
3335}
3336
3337
3338
3339
3340
3341STATIC int
3342xlog_recover_process_efi(
3343 xfs_mount_t *mp,
3344 xfs_efi_log_item_t *efip)
3345{
3346 xfs_efd_log_item_t *efdp;
3347 xfs_trans_t *tp;
3348 int i;
3349 int error = 0;
3350 xfs_extent_t *extp;
3351 xfs_fsblock_t startblock_fsb;
3352
3353 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3354
3355
3356
3357
3358
3359
3360 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3361 extp = &(efip->efi_format.efi_extents[i]);
3362 startblock_fsb = XFS_BB_TO_FSB(mp,
3363 XFS_FSB_TO_DADDR(mp, extp->ext_start));
3364 if ((startblock_fsb == 0) ||
3365 (extp->ext_len == 0) ||
3366 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3367 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3368
3369
3370
3371
3372 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3373 xfs_efi_release(efip, efip->efi_format.efi_nextents);
3374 return XFS_ERROR(EIO);
3375 }
3376 }
3377
3378 tp = xfs_trans_alloc(mp, 0);
3379 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3380 if (error)
3381 goto abort_error;
3382 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3383
3384 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3385 extp = &(efip->efi_format.efi_extents[i]);
3386 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3387 if (error)
3388 goto abort_error;
3389 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3390 extp->ext_len);
3391 }
3392
3393 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3394 error = xfs_trans_commit(tp, 0);
3395 return error;
3396
3397abort_error:
3398 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3399 return error;
3400}
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420STATIC int
3421xlog_recover_process_efis(
3422 struct xlog *log)
3423{
3424 xfs_log_item_t *lip;
3425 xfs_efi_log_item_t *efip;
3426 int error = 0;
3427 struct xfs_ail_cursor cur;
3428 struct xfs_ail *ailp;
3429
3430 ailp = log->l_ailp;
3431 spin_lock(&ailp->xa_lock);
3432 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3433 while (lip != NULL) {
3434
3435
3436
3437
3438 if (lip->li_type != XFS_LI_EFI) {
3439#ifdef DEBUG
3440 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3441 ASSERT(lip->li_type != XFS_LI_EFI);
3442#endif
3443 break;
3444 }
3445
3446
3447
3448
3449 efip = (xfs_efi_log_item_t *)lip;
3450 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3451 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3452 continue;
3453 }
3454
3455 spin_unlock(&ailp->xa_lock);
3456 error = xlog_recover_process_efi(log->l_mp, efip);
3457 spin_lock(&ailp->xa_lock);
3458 if (error)
3459 goto out;
3460 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3461 }
3462out:
3463 xfs_trans_ail_cursor_done(ailp, &cur);
3464 spin_unlock(&ailp->xa_lock);
3465 return error;
3466}
3467
3468
3469
3470
3471
3472STATIC void
3473xlog_recover_clear_agi_bucket(
3474 xfs_mount_t *mp,
3475 xfs_agnumber_t agno,
3476 int bucket)
3477{
3478 xfs_trans_t *tp;
3479 xfs_agi_t *agi;
3480 xfs_buf_t *agibp;
3481 int offset;
3482 int error;
3483
3484 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3485 error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3486 0, 0, 0);
3487 if (error)
3488 goto out_abort;
3489
3490 error = xfs_read_agi(mp, tp, agno, &agibp);
3491 if (error)
3492 goto out_abort;
3493
3494 agi = XFS_BUF_TO_AGI(agibp);
3495 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3496 offset = offsetof(xfs_agi_t, agi_unlinked) +
3497 (sizeof(xfs_agino_t) * bucket);
3498 xfs_trans_log_buf(tp, agibp, offset,
3499 (offset + sizeof(xfs_agino_t) - 1));
3500
3501 error = xfs_trans_commit(tp, 0);
3502 if (error)
3503 goto out_error;
3504 return;
3505
3506out_abort:
3507 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3508out_error:
3509 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3510 return;
3511}
3512
3513STATIC xfs_agino_t
3514xlog_recover_process_one_iunlink(
3515 struct xfs_mount *mp,
3516 xfs_agnumber_t agno,
3517 xfs_agino_t agino,
3518 int bucket)
3519{
3520 struct xfs_buf *ibp;
3521 struct xfs_dinode *dip;
3522 struct xfs_inode *ip;
3523 xfs_ino_t ino;
3524 int error;
3525
3526 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3527 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3528 if (error)
3529 goto fail;
3530
3531
3532
3533
3534 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3535 if (error)
3536 goto fail_iput;
3537
3538 ASSERT(ip->i_d.di_nlink == 0);
3539 ASSERT(ip->i_d.di_mode != 0);
3540
3541
3542 agino = be32_to_cpu(dip->di_next_unlinked);
3543 xfs_buf_relse(ibp);
3544
3545
3546
3547
3548
3549 ip->i_d.di_dmevmask = 0;
3550
3551 IRELE(ip);
3552 return agino;
3553
3554 fail_iput:
3555 IRELE(ip);
3556 fail:
3557
3558
3559
3560
3561
3562
3563
3564
3565 xlog_recover_clear_agi_bucket(mp, agno, bucket);
3566 return NULLAGINO;
3567}
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581STATIC void
3582xlog_recover_process_iunlinks(
3583 struct xlog *log)
3584{
3585 xfs_mount_t *mp;
3586 xfs_agnumber_t agno;
3587 xfs_agi_t *agi;
3588 xfs_buf_t *agibp;
3589 xfs_agino_t agino;
3590 int bucket;
3591 int error;
3592 uint mp_dmevmask;
3593
3594 mp = log->l_mp;
3595
3596
3597
3598
3599 mp_dmevmask = mp->m_dmevmask;
3600 mp->m_dmevmask = 0;
3601
3602 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3603
3604
3605
3606 error = xfs_read_agi(mp, NULL, agno, &agibp);
3607 if (error) {
3608
3609
3610
3611
3612
3613
3614 continue;
3615 }
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625 agi = XFS_BUF_TO_AGI(agibp);
3626 xfs_buf_unlock(agibp);
3627
3628 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3629 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3630 while (agino != NULLAGINO) {
3631 agino = xlog_recover_process_one_iunlink(mp,
3632 agno, agino, bucket);
3633 }
3634 }
3635 xfs_buf_rele(agibp);
3636 }
3637
3638 mp->m_dmevmask = mp_dmevmask;
3639}
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651STATIC int
3652xlog_unpack_data_crc(
3653 struct xlog_rec_header *rhead,
3654 xfs_caddr_t dp,
3655 struct xlog *log)
3656{
3657 __le32 crc;
3658
3659 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
3660 if (crc != rhead->h_crc) {
3661 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
3662 xfs_alert(log->l_mp,
3663 "log record CRC mismatch: found 0x%x, expected 0x%x.\n",
3664 le32_to_cpu(rhead->h_crc),
3665 le32_to_cpu(crc));
3666 xfs_hex_dump(dp, 32);
3667 }
3668
3669
3670
3671
3672
3673
3674 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
3675 return EFSCORRUPTED;
3676 }
3677
3678 return 0;
3679}
3680
3681STATIC int
3682xlog_unpack_data(
3683 struct xlog_rec_header *rhead,
3684 xfs_caddr_t dp,
3685 struct xlog *log)
3686{
3687 int i, j, k;
3688 int error;
3689
3690 error = xlog_unpack_data_crc(rhead, dp, log);
3691 if (error)
3692 return error;
3693
3694 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3695 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3696 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3697 dp += BBSIZE;
3698 }
3699
3700 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3701 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3702 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3703 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3704 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3705 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3706 dp += BBSIZE;
3707 }
3708 }
3709
3710 return 0;
3711}
3712
3713STATIC int
3714xlog_valid_rec_header(
3715 struct xlog *log,
3716 struct xlog_rec_header *rhead,
3717 xfs_daddr_t blkno)
3718{
3719 int hlen;
3720
3721 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
3722 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3723 XFS_ERRLEVEL_LOW, log->l_mp);
3724 return XFS_ERROR(EFSCORRUPTED);
3725 }
3726 if (unlikely(
3727 (!rhead->h_version ||
3728 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3729 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3730 __func__, be32_to_cpu(rhead->h_version));
3731 return XFS_ERROR(EIO);
3732 }
3733
3734
3735 hlen = be32_to_cpu(rhead->h_len);
3736 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3737 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3738 XFS_ERRLEVEL_LOW, log->l_mp);
3739 return XFS_ERROR(EFSCORRUPTED);
3740 }
3741 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3742 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3743 XFS_ERRLEVEL_LOW, log->l_mp);
3744 return XFS_ERROR(EFSCORRUPTED);
3745 }
3746 return 0;
3747}
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757STATIC int
3758xlog_do_recovery_pass(
3759 struct xlog *log,
3760 xfs_daddr_t head_blk,
3761 xfs_daddr_t tail_blk,
3762 int pass)
3763{
3764 xlog_rec_header_t *rhead;
3765 xfs_daddr_t blk_no;
3766 xfs_caddr_t offset;
3767 xfs_buf_t *hbp, *dbp;
3768 int error = 0, h_size;
3769 int bblks, split_bblks;
3770 int hblks, split_hblks, wrapped_hblks;
3771 struct hlist_head rhash[XLOG_RHASH_SIZE];
3772
3773 ASSERT(head_blk != tail_blk);
3774
3775
3776
3777
3778
3779 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3780
3781
3782
3783
3784
3785 hbp = xlog_get_bp(log, 1);
3786 if (!hbp)
3787 return ENOMEM;
3788
3789 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3790 if (error)
3791 goto bread_err1;
3792
3793 rhead = (xlog_rec_header_t *)offset;
3794 error = xlog_valid_rec_header(log, rhead, tail_blk);
3795 if (error)
3796 goto bread_err1;
3797 h_size = be32_to_cpu(rhead->h_size);
3798 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3799 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3800 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3801 if (h_size % XLOG_HEADER_CYCLE_SIZE)
3802 hblks++;
3803 xlog_put_bp(hbp);
3804 hbp = xlog_get_bp(log, hblks);
3805 } else {
3806 hblks = 1;
3807 }
3808 } else {
3809 ASSERT(log->l_sectBBsize == 1);
3810 hblks = 1;
3811 hbp = xlog_get_bp(log, 1);
3812 h_size = XLOG_BIG_RECORD_BSIZE;
3813 }
3814
3815 if (!hbp)
3816 return ENOMEM;
3817 dbp = xlog_get_bp(log, BTOBB(h_size));
3818 if (!dbp) {
3819 xlog_put_bp(hbp);
3820 return ENOMEM;
3821 }
3822
3823 memset(rhash, 0, sizeof(rhash));
3824 if (tail_blk <= head_blk) {
3825 for (blk_no = tail_blk; blk_no < head_blk; ) {
3826 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3827 if (error)
3828 goto bread_err2;
3829
3830 rhead = (xlog_rec_header_t *)offset;
3831 error = xlog_valid_rec_header(log, rhead, blk_no);
3832 if (error)
3833 goto bread_err2;
3834
3835
3836 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3837 error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3838 &offset);
3839 if (error)
3840 goto bread_err2;
3841
3842 error = xlog_unpack_data(rhead, offset, log);
3843 if (error)
3844 goto bread_err2;
3845
3846 error = xlog_recover_process_data(log,
3847 rhash, rhead, offset, pass);
3848 if (error)
3849 goto bread_err2;
3850 blk_no += bblks + hblks;
3851 }
3852 } else {
3853
3854
3855
3856
3857
3858 blk_no = tail_blk;
3859 while (blk_no < log->l_logBBsize) {
3860
3861
3862
3863 offset = hbp->b_addr;
3864 split_hblks = 0;
3865 wrapped_hblks = 0;
3866 if (blk_no + hblks <= log->l_logBBsize) {
3867
3868 error = xlog_bread(log, blk_no, hblks, hbp,
3869 &offset);
3870 if (error)
3871 goto bread_err2;
3872 } else {
3873
3874 if (blk_no != log->l_logBBsize) {
3875
3876 ASSERT(blk_no <= INT_MAX);
3877 split_hblks = log->l_logBBsize - (int)blk_no;
3878 ASSERT(split_hblks > 0);
3879 error = xlog_bread(log, blk_no,
3880 split_hblks, hbp,
3881 &offset);
3882 if (error)
3883 goto bread_err2;
3884 }
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898 wrapped_hblks = hblks - split_hblks;
3899 error = xlog_bread_offset(log, 0,
3900 wrapped_hblks, hbp,
3901 offset + BBTOB(split_hblks));
3902 if (error)
3903 goto bread_err2;
3904 }
3905 rhead = (xlog_rec_header_t *)offset;
3906 error = xlog_valid_rec_header(log, rhead,
3907 split_hblks ? blk_no : 0);
3908 if (error)
3909 goto bread_err2;
3910
3911 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3912 blk_no += hblks;
3913
3914
3915 if (blk_no + bblks <= log->l_logBBsize) {
3916 error = xlog_bread(log, blk_no, bblks, dbp,
3917 &offset);
3918 if (error)
3919 goto bread_err2;
3920 } else {
3921
3922
3923 offset = dbp->b_addr;
3924 split_bblks = 0;
3925 if (blk_no != log->l_logBBsize) {
3926
3927
3928 ASSERT(!wrapped_hblks);
3929 ASSERT(blk_no <= INT_MAX);
3930 split_bblks =
3931 log->l_logBBsize - (int)blk_no;
3932 ASSERT(split_bblks > 0);
3933 error = xlog_bread(log, blk_no,
3934 split_bblks, dbp,
3935 &offset);
3936 if (error)
3937 goto bread_err2;
3938 }
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952 error = xlog_bread_offset(log, 0,
3953 bblks - split_bblks, dbp,
3954 offset + BBTOB(split_bblks));
3955 if (error)
3956 goto bread_err2;
3957 }
3958
3959 error = xlog_unpack_data(rhead, offset, log);
3960 if (error)
3961 goto bread_err2;
3962
3963 error = xlog_recover_process_data(log, rhash,
3964 rhead, offset, pass);
3965 if (error)
3966 goto bread_err2;
3967 blk_no += bblks;
3968 }
3969
3970 ASSERT(blk_no >= log->l_logBBsize);
3971 blk_no -= log->l_logBBsize;
3972
3973
3974 while (blk_no < head_blk) {
3975 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3976 if (error)
3977 goto bread_err2;
3978
3979 rhead = (xlog_rec_header_t *)offset;
3980 error = xlog_valid_rec_header(log, rhead, blk_no);
3981 if (error)
3982 goto bread_err2;
3983
3984 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3985 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3986 &offset);
3987 if (error)
3988 goto bread_err2;
3989
3990 error = xlog_unpack_data(rhead, offset, log);
3991 if (error)
3992 goto bread_err2;
3993
3994 error = xlog_recover_process_data(log, rhash,
3995 rhead, offset, pass);
3996 if (error)
3997 goto bread_err2;
3998 blk_no += bblks + hblks;
3999 }
4000 }
4001
4002 bread_err2:
4003 xlog_put_bp(dbp);
4004 bread_err1:
4005 xlog_put_bp(hbp);
4006 return error;
4007}
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022STATIC int
4023xlog_do_log_recovery(
4024 struct xlog *log,
4025 xfs_daddr_t head_blk,
4026 xfs_daddr_t tail_blk)
4027{
4028 int error, i;
4029
4030 ASSERT(head_blk != tail_blk);
4031
4032
4033
4034
4035
4036 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4037 sizeof(struct list_head),
4038 KM_SLEEP);
4039 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4040 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4041
4042 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4043 XLOG_RECOVER_PASS1);
4044 if (error != 0) {
4045 kmem_free(log->l_buf_cancel_table);
4046 log->l_buf_cancel_table = NULL;
4047 return error;
4048 }
4049
4050
4051
4052
4053 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4054 XLOG_RECOVER_PASS2);
4055#ifdef DEBUG
4056 if (!error) {
4057 int i;
4058
4059 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4060 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4061 }
4062#endif
4063
4064 kmem_free(log->l_buf_cancel_table);
4065 log->l_buf_cancel_table = NULL;
4066
4067 return error;
4068}
4069
4070
4071
4072
4073STATIC int
4074xlog_do_recover(
4075 struct xlog *log,
4076 xfs_daddr_t head_blk,
4077 xfs_daddr_t tail_blk)
4078{
4079 int error;
4080 xfs_buf_t *bp;
4081 xfs_sb_t *sbp;
4082
4083
4084
4085
4086 error = xlog_do_log_recovery(log, head_blk, tail_blk);
4087 if (error)
4088 return error;
4089
4090
4091
4092
4093 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4094 return (EIO);
4095 }
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106 xlog_assign_tail_lsn(log->l_mp);
4107
4108
4109
4110
4111
4112 bp = xfs_getsb(log->l_mp, 0);
4113 XFS_BUF_UNDONE(bp);
4114 ASSERT(!(XFS_BUF_ISWRITE(bp)));
4115 XFS_BUF_READ(bp);
4116 XFS_BUF_UNASYNC(bp);
4117 bp->b_ops = &xfs_sb_buf_ops;
4118 xfsbdstrat(log->l_mp, bp);
4119 error = xfs_buf_iowait(bp);
4120 if (error) {
4121 xfs_buf_ioerror_alert(bp, __func__);
4122 ASSERT(0);
4123 xfs_buf_relse(bp);
4124 return error;
4125 }
4126
4127
4128 sbp = &log->l_mp->m_sb;
4129 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4130 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4131 ASSERT(xfs_sb_good_version(sbp));
4132 xfs_buf_relse(bp);
4133
4134
4135 xfs_icsb_reinit_counters(log->l_mp);
4136
4137 xlog_recover_check_summary(log);
4138
4139
4140 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4141 return 0;
4142}
4143
4144
4145
4146
4147
4148
4149int
4150xlog_recover(
4151 struct xlog *log)
4152{
4153 xfs_daddr_t head_blk, tail_blk;
4154 int error;
4155
4156
4157 if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
4158 return error;
4159
4160 if (tail_blk != head_blk) {
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4173 return error;
4174 }
4175
4176
4177
4178
4179
4180
4181
4182
4183 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
4184 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
4185 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
4186 xfs_warn(log->l_mp,
4187"Superblock has unknown incompatible log features (0x%x) enabled.\n"
4188"The log can not be fully and/or safely recovered by this kernel.\n"
4189"Please recover the log on a kernel that supports the unknown features.",
4190 (log->l_mp->m_sb.sb_features_log_incompat &
4191 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
4192 return EINVAL;
4193 }
4194
4195 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
4196 log->l_mp->m_logname ? log->l_mp->m_logname
4197 : "internal");
4198
4199 error = xlog_do_recover(log, head_blk, tail_blk);
4200 log->l_flags |= XLOG_RECOVERY_NEEDED;
4201 }
4202 return error;
4203}
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214int
4215xlog_recover_finish(
4216 struct xlog *log)
4217{
4218
4219
4220
4221
4222
4223
4224
4225
4226 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
4227 int error;
4228 error = xlog_recover_process_efis(log);
4229 if (error) {
4230 xfs_alert(log->l_mp, "Failed to recover EFIs");
4231 return error;
4232 }
4233
4234
4235
4236
4237
4238
4239 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
4240
4241 xlog_recover_process_iunlinks(log);
4242
4243 xlog_recover_check_summary(log);
4244
4245 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
4246 log->l_mp->m_logname ? log->l_mp->m_logname
4247 : "internal");
4248 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
4249 } else {
4250 xfs_info(log->l_mp, "Ending clean mount");
4251 }
4252 return 0;
4253}
4254
4255
4256#if defined(DEBUG)
4257
4258
4259
4260
4261void
4262xlog_recover_check_summary(
4263 struct xlog *log)
4264{
4265 xfs_mount_t *mp;
4266 xfs_agf_t *agfp;
4267 xfs_buf_t *agfbp;
4268 xfs_buf_t *agibp;
4269 xfs_agnumber_t agno;
4270 __uint64_t freeblks;
4271 __uint64_t itotal;
4272 __uint64_t ifree;
4273 int error;
4274
4275 mp = log->l_mp;
4276
4277 freeblks = 0LL;
4278 itotal = 0LL;
4279 ifree = 0LL;
4280 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4281 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4282 if (error) {
4283 xfs_alert(mp, "%s agf read failed agno %d error %d",
4284 __func__, agno, error);
4285 } else {
4286 agfp = XFS_BUF_TO_AGF(agfbp);
4287 freeblks += be32_to_cpu(agfp->agf_freeblks) +
4288 be32_to_cpu(agfp->agf_flcount);
4289 xfs_buf_relse(agfbp);
4290 }
4291
4292 error = xfs_read_agi(mp, NULL, agno, &agibp);
4293 if (error) {
4294 xfs_alert(mp, "%s agi read failed agno %d error %d",
4295 __func__, agno, error);
4296 } else {
4297 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
4298
4299 itotal += be32_to_cpu(agi->agi_count);
4300 ifree += be32_to_cpu(agi->agi_freecount);
4301 xfs_buf_relse(agibp);
4302 }
4303 }
4304}
4305#endif
4306