1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_btree.h"
13#include "xfs_alloc.h"
14#include "xfs_rmap.h"
15#include "scrub/scrub.h"
16#include "scrub/common.h"
17#include "scrub/btree.h"
18#include "xfs_ag.h"
19
20
21
22
23int
24xchk_setup_ag_allocbt(
25 struct xfs_scrub *sc)
26{
27 return xchk_setup_ag_btree(sc, false);
28}
29
30
31
32
33
34
35STATIC void
36xchk_allocbt_xref_other(
37 struct xfs_scrub *sc,
38 xfs_agblock_t agbno,
39 xfs_extlen_t len)
40{
41 struct xfs_btree_cur **pcur;
42 xfs_agblock_t fbno;
43 xfs_extlen_t flen;
44 int has_otherrec;
45 int error;
46
47 if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT)
48 pcur = &sc->sa.cnt_cur;
49 else
50 pcur = &sc->sa.bno_cur;
51 if (!*pcur || xchk_skip_xref(sc->sm))
52 return;
53
54 error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec);
55 if (!xchk_should_check_xref(sc, &error, pcur))
56 return;
57 if (!has_otherrec) {
58 xchk_btree_xref_set_corrupt(sc, *pcur, 0);
59 return;
60 }
61
62 error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec);
63 if (!xchk_should_check_xref(sc, &error, pcur))
64 return;
65 if (!has_otherrec) {
66 xchk_btree_xref_set_corrupt(sc, *pcur, 0);
67 return;
68 }
69
70 if (fbno != agbno || flen != len)
71 xchk_btree_xref_set_corrupt(sc, *pcur, 0);
72}
73
74
75STATIC void
76xchk_allocbt_xref(
77 struct xfs_scrub *sc,
78 xfs_agblock_t agbno,
79 xfs_extlen_t len)
80{
81 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
82 return;
83
84 xchk_allocbt_xref_other(sc, agbno, len);
85 xchk_xref_is_not_inode_chunk(sc, agbno, len);
86 xchk_xref_has_no_owner(sc, agbno, len);
87 xchk_xref_is_not_shared(sc, agbno, len);
88}
89
90
91STATIC int
92xchk_allocbt_rec(
93 struct xchk_btree *bs,
94 const union xfs_btree_rec *rec)
95{
96 struct xfs_mount *mp = bs->cur->bc_mp;
97 xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
98 xfs_agblock_t bno;
99 xfs_extlen_t len;
100
101 bno = be32_to_cpu(rec->alloc.ar_startblock);
102 len = be32_to_cpu(rec->alloc.ar_blockcount);
103
104 if (bno + len <= bno ||
105 !xfs_verify_agbno(mp, agno, bno) ||
106 !xfs_verify_agbno(mp, agno, bno + len - 1))
107 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
108
109 xchk_allocbt_xref(bs->sc, bno, len);
110
111 return 0;
112}
113
114
115STATIC int
116xchk_allocbt(
117 struct xfs_scrub *sc,
118 xfs_btnum_t which)
119{
120 struct xfs_btree_cur *cur;
121
122 cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur;
123 return xchk_btree(sc, cur, xchk_allocbt_rec, &XFS_RMAP_OINFO_AG, NULL);
124}
125
126int
127xchk_bnobt(
128 struct xfs_scrub *sc)
129{
130 return xchk_allocbt(sc, XFS_BTNUM_BNO);
131}
132
133int
134xchk_cntbt(
135 struct xfs_scrub *sc)
136{
137 return xchk_allocbt(sc, XFS_BTNUM_CNT);
138}
139
140
141void
142xchk_xref_is_used_space(
143 struct xfs_scrub *sc,
144 xfs_agblock_t agbno,
145 xfs_extlen_t len)
146{
147 bool is_freesp;
148 int error;
149
150 if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm))
151 return;
152
153 error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
154 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
155 return;
156 if (is_freesp)
157 xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
158}
159