1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_btree.h"
13#include "xfs_log_format.h"
14#include "xfs_trans.h"
15#include "xfs_sb.h"
16#include "xfs_alloc.h"
17#include "xfs_alloc_btree.h"
18#include "xfs_ialloc.h"
19#include "xfs_ialloc_btree.h"
20#include "xfs_rmap.h"
21#include "xfs_rmap_btree.h"
22#include "xfs_refcount_btree.h"
23#include "xfs_ag.h"
24#include "scrub/scrub.h"
25#include "scrub/common.h"
26#include "scrub/trace.h"
27#include "scrub/repair.h"
28#include "scrub/bitmap.h"
29
30
31
32
33int
34xrep_superblock(
35 struct xfs_scrub *sc)
36{
37 struct xfs_mount *mp = sc->mp;
38 struct xfs_buf *bp;
39 xfs_agnumber_t agno;
40 int error;
41
42
43 agno = sc->sm->sm_agno;
44 if (agno == 0)
45 return -EOPNOTSUPP;
46
47 error = xfs_sb_get_secondary(mp, sc->tp, agno, &bp);
48 if (error)
49 return error;
50
51
52 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
53 xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
54
55
56 xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF);
57 xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1);
58 return error;
59}
60
61
62
63struct xrep_agf_allocbt {
64 struct xfs_scrub *sc;
65 xfs_agblock_t freeblks;
66 xfs_agblock_t longest;
67};
68
69
70STATIC int
71xrep_agf_walk_allocbt(
72 struct xfs_btree_cur *cur,
73 const struct xfs_alloc_rec_incore *rec,
74 void *priv)
75{
76 struct xrep_agf_allocbt *raa = priv;
77 int error = 0;
78
79 if (xchk_should_terminate(raa->sc, &error))
80 return error;
81
82 raa->freeblks += rec->ar_blockcount;
83 if (rec->ar_blockcount > raa->longest)
84 raa->longest = rec->ar_blockcount;
85 return error;
86}
87
88
89STATIC int
90xrep_agf_check_agfl_block(
91 struct xfs_mount *mp,
92 xfs_agblock_t agbno,
93 void *priv)
94{
95 struct xfs_scrub *sc = priv;
96
97 if (!xfs_verify_agbno(mp, sc->sa.pag->pag_agno, agbno))
98 return -EFSCORRUPTED;
99 return 0;
100}
101
102
103
104
105
106enum {
107 XREP_AGF_BNOBT = 0,
108 XREP_AGF_CNTBT,
109 XREP_AGF_RMAPBT,
110 XREP_AGF_REFCOUNTBT,
111 XREP_AGF_END,
112 XREP_AGF_MAX
113};
114
115
116static inline bool
117xrep_check_btree_root(
118 struct xfs_scrub *sc,
119 struct xrep_find_ag_btree *fab)
120{
121 struct xfs_mount *mp = sc->mp;
122 xfs_agnumber_t agno = sc->sm->sm_agno;
123
124 return xfs_verify_agbno(mp, agno, fab->root) &&
125 fab->height <= XFS_BTREE_MAXLEVELS;
126}
127
128
129
130
131
132
133
134
135
136
137STATIC int
138xrep_agf_find_btrees(
139 struct xfs_scrub *sc,
140 struct xfs_buf *agf_bp,
141 struct xrep_find_ag_btree *fab,
142 struct xfs_buf *agfl_bp)
143{
144 struct xfs_agf *old_agf = agf_bp->b_addr;
145 int error;
146
147
148 error = xrep_find_ag_btree_roots(sc, agf_bp, fab, agfl_bp);
149 if (error)
150 return error;
151
152
153 if (!xrep_check_btree_root(sc, &fab[XREP_AGF_BNOBT]) ||
154 !xrep_check_btree_root(sc, &fab[XREP_AGF_CNTBT]) ||
155 !xrep_check_btree_root(sc, &fab[XREP_AGF_RMAPBT]))
156 return -EFSCORRUPTED;
157
158
159
160
161
162 if (fab[XREP_AGF_RMAPBT].root !=
163 be32_to_cpu(old_agf->agf_roots[XFS_BTNUM_RMAPi]))
164 return -EFSCORRUPTED;
165
166
167 if (xfs_has_reflink(sc->mp) &&
168 !xrep_check_btree_root(sc, &fab[XREP_AGF_REFCOUNTBT]))
169 return -EFSCORRUPTED;
170
171 return 0;
172}
173
174
175
176
177
178STATIC void
179xrep_agf_init_header(
180 struct xfs_scrub *sc,
181 struct xfs_buf *agf_bp,
182 struct xfs_agf *old_agf)
183{
184 struct xfs_mount *mp = sc->mp;
185 struct xfs_agf *agf = agf_bp->b_addr;
186
187 memcpy(old_agf, agf, sizeof(*old_agf));
188 memset(agf, 0, BBTOB(agf_bp->b_length));
189 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
190 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
191 agf->agf_seqno = cpu_to_be32(sc->sa.pag->pag_agno);
192 agf->agf_length = cpu_to_be32(xfs_ag_block_count(mp,
193 sc->sa.pag->pag_agno));
194 agf->agf_flfirst = old_agf->agf_flfirst;
195 agf->agf_fllast = old_agf->agf_fllast;
196 agf->agf_flcount = old_agf->agf_flcount;
197 if (xfs_has_crc(mp))
198 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
199
200
201 ASSERT(sc->sa.pag->pagf_init);
202 sc->sa.pag->pagf_init = 0;
203}
204
205
206STATIC void
207xrep_agf_set_roots(
208 struct xfs_scrub *sc,
209 struct xfs_agf *agf,
210 struct xrep_find_ag_btree *fab)
211{
212 agf->agf_roots[XFS_BTNUM_BNOi] =
213 cpu_to_be32(fab[XREP_AGF_BNOBT].root);
214 agf->agf_levels[XFS_BTNUM_BNOi] =
215 cpu_to_be32(fab[XREP_AGF_BNOBT].height);
216
217 agf->agf_roots[XFS_BTNUM_CNTi] =
218 cpu_to_be32(fab[XREP_AGF_CNTBT].root);
219 agf->agf_levels[XFS_BTNUM_CNTi] =
220 cpu_to_be32(fab[XREP_AGF_CNTBT].height);
221
222 agf->agf_roots[XFS_BTNUM_RMAPi] =
223 cpu_to_be32(fab[XREP_AGF_RMAPBT].root);
224 agf->agf_levels[XFS_BTNUM_RMAPi] =
225 cpu_to_be32(fab[XREP_AGF_RMAPBT].height);
226
227 if (xfs_has_reflink(sc->mp)) {
228 agf->agf_refcount_root =
229 cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].root);
230 agf->agf_refcount_level =
231 cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].height);
232 }
233}
234
235
236STATIC int
237xrep_agf_calc_from_btrees(
238 struct xfs_scrub *sc,
239 struct xfs_buf *agf_bp)
240{
241 struct xrep_agf_allocbt raa = { .sc = sc };
242 struct xfs_btree_cur *cur = NULL;
243 struct xfs_agf *agf = agf_bp->b_addr;
244 struct xfs_mount *mp = sc->mp;
245 xfs_agblock_t btreeblks;
246 xfs_agblock_t blocks;
247 int error;
248
249
250 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
251 sc->sa.pag, XFS_BTNUM_BNO);
252 error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
253 if (error)
254 goto err;
255 error = xfs_btree_count_blocks(cur, &blocks);
256 if (error)
257 goto err;
258 xfs_btree_del_cursor(cur, error);
259 btreeblks = blocks - 1;
260 agf->agf_freeblks = cpu_to_be32(raa.freeblks);
261 agf->agf_longest = cpu_to_be32(raa.longest);
262
263
264 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
265 sc->sa.pag, XFS_BTNUM_CNT);
266 error = xfs_btree_count_blocks(cur, &blocks);
267 if (error)
268 goto err;
269 xfs_btree_del_cursor(cur, error);
270 btreeblks += blocks - 1;
271
272
273 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
274 error = xfs_btree_count_blocks(cur, &blocks);
275 if (error)
276 goto err;
277 xfs_btree_del_cursor(cur, error);
278 agf->agf_rmap_blocks = cpu_to_be32(blocks);
279 btreeblks += blocks - 1;
280
281 agf->agf_btreeblks = cpu_to_be32(btreeblks);
282
283
284 if (xfs_has_reflink(mp)) {
285 cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
286 sc->sa.pag);
287 error = xfs_btree_count_blocks(cur, &blocks);
288 if (error)
289 goto err;
290 xfs_btree_del_cursor(cur, error);
291 agf->agf_refcount_blocks = cpu_to_be32(blocks);
292 }
293
294 return 0;
295err:
296 xfs_btree_del_cursor(cur, error);
297 return error;
298}
299
300
301STATIC int
302xrep_agf_commit_new(
303 struct xfs_scrub *sc,
304 struct xfs_buf *agf_bp)
305{
306 struct xfs_perag *pag;
307 struct xfs_agf *agf = agf_bp->b_addr;
308
309
310 xfs_force_summary_recalc(sc->mp);
311
312
313 xfs_trans_buf_set_type(sc->tp, agf_bp, XFS_BLFT_AGF_BUF);
314 xfs_trans_log_buf(sc->tp, agf_bp, 0, BBTOB(agf_bp->b_length) - 1);
315
316
317 pag = sc->sa.pag;
318 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
319 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
320 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
321 pag->pagf_levels[XFS_BTNUM_BNOi] =
322 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
323 pag->pagf_levels[XFS_BTNUM_CNTi] =
324 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
325 pag->pagf_levels[XFS_BTNUM_RMAPi] =
326 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
327 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
328 pag->pagf_init = 1;
329
330 return 0;
331}
332
333
334int
335xrep_agf(
336 struct xfs_scrub *sc)
337{
338 struct xrep_find_ag_btree fab[XREP_AGF_MAX] = {
339 [XREP_AGF_BNOBT] = {
340 .rmap_owner = XFS_RMAP_OWN_AG,
341 .buf_ops = &xfs_bnobt_buf_ops,
342 },
343 [XREP_AGF_CNTBT] = {
344 .rmap_owner = XFS_RMAP_OWN_AG,
345 .buf_ops = &xfs_cntbt_buf_ops,
346 },
347 [XREP_AGF_RMAPBT] = {
348 .rmap_owner = XFS_RMAP_OWN_AG,
349 .buf_ops = &xfs_rmapbt_buf_ops,
350 },
351 [XREP_AGF_REFCOUNTBT] = {
352 .rmap_owner = XFS_RMAP_OWN_REFC,
353 .buf_ops = &xfs_refcountbt_buf_ops,
354 },
355 [XREP_AGF_END] = {
356 .buf_ops = NULL,
357 },
358 };
359 struct xfs_agf old_agf;
360 struct xfs_mount *mp = sc->mp;
361 struct xfs_buf *agf_bp;
362 struct xfs_buf *agfl_bp;
363 struct xfs_agf *agf;
364 int error;
365
366
367 if (!xfs_has_rmapbt(mp))
368 return -EOPNOTSUPP;
369
370
371
372
373
374 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
375 XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
376 XFS_AGF_DADDR(mp)),
377 XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL);
378 if (error)
379 return error;
380 agf_bp->b_ops = &xfs_agf_buf_ops;
381 agf = agf_bp->b_addr;
382
383
384
385
386
387
388
389
390
391
392 error = xfs_alloc_read_agfl(mp, sc->tp, sc->sa.pag->pag_agno, &agfl_bp);
393 if (error)
394 return error;
395
396
397
398
399
400 error = xfs_agfl_walk(sc->mp, agf_bp->b_addr, agfl_bp,
401 xrep_agf_check_agfl_block, sc);
402 if (error)
403 return error;
404
405
406
407
408
409 error = xrep_agf_find_btrees(sc, agf_bp, fab, agfl_bp);
410 if (error)
411 return error;
412
413
414 xrep_agf_init_header(sc, agf_bp, &old_agf);
415 xrep_agf_set_roots(sc, agf, fab);
416 error = xrep_agf_calc_from_btrees(sc, agf_bp);
417 if (error)
418 goto out_revert;
419
420
421 return xrep_agf_commit_new(sc, agf_bp);
422
423out_revert:
424
425 sc->sa.pag->pagf_init = 0;
426 memcpy(agf, &old_agf, sizeof(old_agf));
427 return error;
428}
429
430
431
432struct xrep_agfl {
433
434 struct xbitmap agmetablocks;
435
436
437 struct xbitmap *freesp;
438
439 struct xfs_scrub *sc;
440};
441
442
443STATIC int
444xrep_agfl_walk_rmap(
445 struct xfs_btree_cur *cur,
446 const struct xfs_rmap_irec *rec,
447 void *priv)
448{
449 struct xrep_agfl *ra = priv;
450 xfs_fsblock_t fsb;
451 int error = 0;
452
453 if (xchk_should_terminate(ra->sc, &error))
454 return error;
455
456
457 if (rec->rm_owner == XFS_RMAP_OWN_AG) {
458 fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
459 rec->rm_startblock);
460 error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount);
461 if (error)
462 return error;
463 }
464
465 return xbitmap_set_btcur_path(&ra->agmetablocks, cur);
466}
467
468
469
470
471
472
473
474
475
476
477STATIC int
478xrep_agfl_collect_blocks(
479 struct xfs_scrub *sc,
480 struct xfs_buf *agf_bp,
481 struct xbitmap *agfl_extents,
482 xfs_agblock_t *flcount)
483{
484 struct xrep_agfl ra;
485 struct xfs_mount *mp = sc->mp;
486 struct xfs_btree_cur *cur;
487 int error;
488
489 ra.sc = sc;
490 ra.freesp = agfl_extents;
491 xbitmap_init(&ra.agmetablocks);
492
493
494 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
495 error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
496 if (error)
497 goto err;
498 xfs_btree_del_cursor(cur, error);
499
500
501 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
502 sc->sa.pag, XFS_BTNUM_BNO);
503 error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
504 if (error)
505 goto err;
506 xfs_btree_del_cursor(cur, error);
507
508
509 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
510 sc->sa.pag, XFS_BTNUM_CNT);
511 error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
512 if (error)
513 goto err;
514
515 xfs_btree_del_cursor(cur, error);
516
517
518
519
520
521 error = xbitmap_disunion(agfl_extents, &ra.agmetablocks);
522 xbitmap_destroy(&ra.agmetablocks);
523 if (error)
524 return error;
525
526
527
528
529
530 *flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents),
531 xfs_agfl_size(mp));
532 return 0;
533
534err:
535 xbitmap_destroy(&ra.agmetablocks);
536 xfs_btree_del_cursor(cur, error);
537 return error;
538}
539
540
541STATIC void
542xrep_agfl_update_agf(
543 struct xfs_scrub *sc,
544 struct xfs_buf *agf_bp,
545 xfs_agblock_t flcount)
546{
547 struct xfs_agf *agf = agf_bp->b_addr;
548
549 ASSERT(flcount <= xfs_agfl_size(sc->mp));
550
551
552 xfs_force_summary_recalc(sc->mp);
553
554
555 if (sc->sa.pag->pagf_init)
556 sc->sa.pag->pagf_flcount = flcount;
557 agf->agf_flfirst = cpu_to_be32(0);
558 agf->agf_flcount = cpu_to_be32(flcount);
559 agf->agf_fllast = cpu_to_be32(flcount - 1);
560
561 xfs_alloc_log_agf(sc->tp, agf_bp,
562 XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
563}
564
565
566STATIC void
567xrep_agfl_init_header(
568 struct xfs_scrub *sc,
569 struct xfs_buf *agfl_bp,
570 struct xbitmap *agfl_extents,
571 xfs_agblock_t flcount)
572{
573 struct xfs_mount *mp = sc->mp;
574 __be32 *agfl_bno;
575 struct xbitmap_range *br;
576 struct xbitmap_range *n;
577 struct xfs_agfl *agfl;
578 xfs_agblock_t agbno;
579 unsigned int fl_off;
580
581 ASSERT(flcount <= xfs_agfl_size(mp));
582
583
584
585
586
587 agfl = XFS_BUF_TO_AGFL(agfl_bp);
588 memset(agfl, 0xFF, BBTOB(agfl_bp->b_length));
589 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
590 agfl->agfl_seqno = cpu_to_be32(sc->sa.pag->pag_agno);
591 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
592
593
594
595
596
597
598 fl_off = 0;
599 agfl_bno = xfs_buf_to_agfl_bno(agfl_bp);
600 for_each_xbitmap_extent(br, n, agfl_extents) {
601 agbno = XFS_FSB_TO_AGBNO(mp, br->start);
602
603 trace_xrep_agfl_insert(mp, sc->sa.pag->pag_agno, agbno,
604 br->len);
605
606 while (br->len > 0 && fl_off < flcount) {
607 agfl_bno[fl_off] = cpu_to_be32(agbno);
608 fl_off++;
609 agbno++;
610
611
612
613
614
615 br->start++;
616 br->len--;
617 }
618
619 if (br->len)
620 break;
621 list_del(&br->list);
622 kmem_free(br);
623 }
624
625
626 xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF);
627 xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1);
628}
629
630
631int
632xrep_agfl(
633 struct xfs_scrub *sc)
634{
635 struct xbitmap agfl_extents;
636 struct xfs_mount *mp = sc->mp;
637 struct xfs_buf *agf_bp;
638 struct xfs_buf *agfl_bp;
639 xfs_agblock_t flcount;
640 int error;
641
642
643 if (!xfs_has_rmapbt(mp))
644 return -EOPNOTSUPP;
645
646 xbitmap_init(&agfl_extents);
647
648
649
650
651
652
653 error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.pag->pag_agno, 0,
654 &agf_bp);
655 if (error)
656 return error;
657
658
659
660
661
662 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
663 XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
664 XFS_AGFL_DADDR(mp)),
665 XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL);
666 if (error)
667 return error;
668 agfl_bp->b_ops = &xfs_agfl_buf_ops;
669
670
671 error = xrep_agfl_collect_blocks(sc, agf_bp, &agfl_extents, &flcount);
672 if (error)
673 goto err;
674
675
676
677
678
679
680 xrep_agfl_update_agf(sc, agf_bp, flcount);
681 xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount);
682
683
684
685
686
687
688 sc->sa.agf_bp = agf_bp;
689 sc->sa.agfl_bp = agfl_bp;
690 error = xrep_roll_ag_trans(sc);
691 if (error)
692 goto err;
693
694
695 error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
696 XFS_AG_RESV_AGFL);
697err:
698 xbitmap_destroy(&agfl_extents);
699 return error;
700}
701
702
703
704
705
706
707
708enum {
709 XREP_AGI_INOBT = 0,
710 XREP_AGI_FINOBT,
711 XREP_AGI_END,
712 XREP_AGI_MAX
713};
714
715
716
717
718
719STATIC int
720xrep_agi_find_btrees(
721 struct xfs_scrub *sc,
722 struct xrep_find_ag_btree *fab)
723{
724 struct xfs_buf *agf_bp;
725 struct xfs_mount *mp = sc->mp;
726 int error;
727
728
729 error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.pag->pag_agno, 0,
730 &agf_bp);
731 if (error)
732 return error;
733
734
735 error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL);
736 if (error)
737 return error;
738
739
740 if (!xrep_check_btree_root(sc, &fab[XREP_AGI_INOBT]))
741 return -EFSCORRUPTED;
742
743
744 if (xfs_has_finobt(mp) &&
745 !xrep_check_btree_root(sc, &fab[XREP_AGI_FINOBT]))
746 return -EFSCORRUPTED;
747
748 return 0;
749}
750
751
752
753
754
755STATIC void
756xrep_agi_init_header(
757 struct xfs_scrub *sc,
758 struct xfs_buf *agi_bp,
759 struct xfs_agi *old_agi)
760{
761 struct xfs_agi *agi = agi_bp->b_addr;
762 struct xfs_mount *mp = sc->mp;
763
764 memcpy(old_agi, agi, sizeof(*old_agi));
765 memset(agi, 0, BBTOB(agi_bp->b_length));
766 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
767 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
768 agi->agi_seqno = cpu_to_be32(sc->sa.pag->pag_agno);
769 agi->agi_length = cpu_to_be32(xfs_ag_block_count(mp,
770 sc->sa.pag->pag_agno));
771 agi->agi_newino = cpu_to_be32(NULLAGINO);
772 agi->agi_dirino = cpu_to_be32(NULLAGINO);
773 if (xfs_has_crc(mp))
774 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
775
776
777 memcpy(&agi->agi_unlinked, &old_agi->agi_unlinked,
778 sizeof(agi->agi_unlinked));
779
780
781 ASSERT(sc->sa.pag->pagi_init);
782 sc->sa.pag->pagi_init = 0;
783}
784
785
786STATIC void
787xrep_agi_set_roots(
788 struct xfs_scrub *sc,
789 struct xfs_agi *agi,
790 struct xrep_find_ag_btree *fab)
791{
792 agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root);
793 agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height);
794
795 if (xfs_has_finobt(sc->mp)) {
796 agi->agi_free_root = cpu_to_be32(fab[XREP_AGI_FINOBT].root);
797 agi->agi_free_level = cpu_to_be32(fab[XREP_AGI_FINOBT].height);
798 }
799}
800
801
802STATIC int
803xrep_agi_calc_from_btrees(
804 struct xfs_scrub *sc,
805 struct xfs_buf *agi_bp)
806{
807 struct xfs_btree_cur *cur;
808 struct xfs_agi *agi = agi_bp->b_addr;
809 struct xfs_mount *mp = sc->mp;
810 xfs_agino_t count;
811 xfs_agino_t freecount;
812 int error;
813
814 cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp,
815 sc->sa.pag, XFS_BTNUM_INO);
816 error = xfs_ialloc_count_inodes(cur, &count, &freecount);
817 if (error)
818 goto err;
819 if (xfs_has_inobtcounts(mp)) {
820 xfs_agblock_t blocks;
821
822 error = xfs_btree_count_blocks(cur, &blocks);
823 if (error)
824 goto err;
825 agi->agi_iblocks = cpu_to_be32(blocks);
826 }
827 xfs_btree_del_cursor(cur, error);
828
829 agi->agi_count = cpu_to_be32(count);
830 agi->agi_freecount = cpu_to_be32(freecount);
831
832 if (xfs_has_finobt(mp) && xfs_has_inobtcounts(mp)) {
833 xfs_agblock_t blocks;
834
835 cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp,
836 sc->sa.pag, XFS_BTNUM_FINO);
837 error = xfs_btree_count_blocks(cur, &blocks);
838 if (error)
839 goto err;
840 xfs_btree_del_cursor(cur, error);
841 agi->agi_fblocks = cpu_to_be32(blocks);
842 }
843
844 return 0;
845err:
846 xfs_btree_del_cursor(cur, error);
847 return error;
848}
849
850
851STATIC int
852xrep_agi_commit_new(
853 struct xfs_scrub *sc,
854 struct xfs_buf *agi_bp)
855{
856 struct xfs_perag *pag;
857 struct xfs_agi *agi = agi_bp->b_addr;
858
859
860 xfs_force_summary_recalc(sc->mp);
861
862
863 xfs_trans_buf_set_type(sc->tp, agi_bp, XFS_BLFT_AGI_BUF);
864 xfs_trans_log_buf(sc->tp, agi_bp, 0, BBTOB(agi_bp->b_length) - 1);
865
866
867 pag = sc->sa.pag;
868 pag->pagi_count = be32_to_cpu(agi->agi_count);
869 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
870 pag->pagi_init = 1;
871
872 return 0;
873}
874
875
876int
877xrep_agi(
878 struct xfs_scrub *sc)
879{
880 struct xrep_find_ag_btree fab[XREP_AGI_MAX] = {
881 [XREP_AGI_INOBT] = {
882 .rmap_owner = XFS_RMAP_OWN_INOBT,
883 .buf_ops = &xfs_inobt_buf_ops,
884 },
885 [XREP_AGI_FINOBT] = {
886 .rmap_owner = XFS_RMAP_OWN_INOBT,
887 .buf_ops = &xfs_finobt_buf_ops,
888 },
889 [XREP_AGI_END] = {
890 .buf_ops = NULL
891 },
892 };
893 struct xfs_agi old_agi;
894 struct xfs_mount *mp = sc->mp;
895 struct xfs_buf *agi_bp;
896 struct xfs_agi *agi;
897 int error;
898
899
900 if (!xfs_has_rmapbt(mp))
901 return -EOPNOTSUPP;
902
903
904
905
906
907 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
908 XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
909 XFS_AGI_DADDR(mp)),
910 XFS_FSS_TO_BB(mp, 1), 0, &agi_bp, NULL);
911 if (error)
912 return error;
913 agi_bp->b_ops = &xfs_agi_buf_ops;
914 agi = agi_bp->b_addr;
915
916
917 error = xrep_agi_find_btrees(sc, fab);
918 if (error)
919 return error;
920
921
922 xrep_agi_init_header(sc, agi_bp, &old_agi);
923 xrep_agi_set_roots(sc, agi, fab);
924 error = xrep_agi_calc_from_btrees(sc, agi_bp);
925 if (error)
926 goto out_revert;
927
928
929 return xrep_agi_commit_new(sc, agi_bp);
930
931out_revert:
932
933 sc->sa.pag->pagi_init = 0;
934 memcpy(agi, &old_agi, sizeof(old_agi));
935 return error;
936}
937