1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_btree.h"
15#include "xfs_bmap.h"
16#include "xfs_refcount_btree.h"
17#include "xfs_alloc.h"
18#include "xfs_error.h"
19#include "xfs_trace.h"
20#include "xfs_cksum.h"
21#include "xfs_trans.h"
22#include "xfs_bit.h"
23#include "xfs_rmap.h"
24
25static struct xfs_btree_cur *
26xfs_refcountbt_dup_cursor(
27 struct xfs_btree_cur *cur)
28{
29 return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
30 cur->bc_private.a.agbp, cur->bc_private.a.agno);
31}
32
33STATIC void
34xfs_refcountbt_set_root(
35 struct xfs_btree_cur *cur,
36 union xfs_btree_ptr *ptr,
37 int inc)
38{
39 struct xfs_buf *agbp = cur->bc_private.a.agbp;
40 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
41 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
42 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
43
44 ASSERT(ptr->s != 0);
45
46 agf->agf_refcount_root = ptr->s;
47 be32_add_cpu(&agf->agf_refcount_level, inc);
48 pag->pagf_refcount_level += inc;
49 xfs_perag_put(pag);
50
51 xfs_alloc_log_agf(cur->bc_tp, agbp,
52 XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
53}
54
55STATIC int
56xfs_refcountbt_alloc_block(
57 struct xfs_btree_cur *cur,
58 union xfs_btree_ptr *start,
59 union xfs_btree_ptr *new,
60 int *stat)
61{
62 struct xfs_buf *agbp = cur->bc_private.a.agbp;
63 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
64 struct xfs_alloc_arg args;
65 int error;
66
67 memset(&args, 0, sizeof(args));
68 args.tp = cur->bc_tp;
69 args.mp = cur->bc_mp;
70 args.type = XFS_ALLOCTYPE_NEAR_BNO;
71 args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
72 xfs_refc_block(args.mp));
73 args.oinfo = XFS_RMAP_OINFO_REFC;
74 args.minlen = args.maxlen = args.prod = 1;
75 args.resv = XFS_AG_RESV_METADATA;
76
77 error = xfs_alloc_vextent(&args);
78 if (error)
79 goto out_error;
80 trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
81 args.agbno, 1);
82 if (args.fsbno == NULLFSBLOCK) {
83 *stat = 0;
84 return 0;
85 }
86 ASSERT(args.agno == cur->bc_private.a.agno);
87 ASSERT(args.len == 1);
88
89 new->s = cpu_to_be32(args.agbno);
90 be32_add_cpu(&agf->agf_refcount_blocks, 1);
91 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
92
93 *stat = 1;
94 return 0;
95
96out_error:
97 return error;
98}
99
100STATIC int
101xfs_refcountbt_free_block(
102 struct xfs_btree_cur *cur,
103 struct xfs_buf *bp)
104{
105 struct xfs_mount *mp = cur->bc_mp;
106 struct xfs_buf *agbp = cur->bc_private.a.agbp;
107 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
108 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
109 int error;
110
111 trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
112 XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
113 be32_add_cpu(&agf->agf_refcount_blocks, -1);
114 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
115 error = xfs_free_extent(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_REFC,
116 XFS_AG_RESV_METADATA);
117 if (error)
118 return error;
119
120 return error;
121}
122
123STATIC int
124xfs_refcountbt_get_minrecs(
125 struct xfs_btree_cur *cur,
126 int level)
127{
128 return cur->bc_mp->m_refc_mnr[level != 0];
129}
130
131STATIC int
132xfs_refcountbt_get_maxrecs(
133 struct xfs_btree_cur *cur,
134 int level)
135{
136 return cur->bc_mp->m_refc_mxr[level != 0];
137}
138
139STATIC void
140xfs_refcountbt_init_key_from_rec(
141 union xfs_btree_key *key,
142 union xfs_btree_rec *rec)
143{
144 key->refc.rc_startblock = rec->refc.rc_startblock;
145}
146
147STATIC void
148xfs_refcountbt_init_high_key_from_rec(
149 union xfs_btree_key *key,
150 union xfs_btree_rec *rec)
151{
152 __u32 x;
153
154 x = be32_to_cpu(rec->refc.rc_startblock);
155 x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
156 key->refc.rc_startblock = cpu_to_be32(x);
157}
158
159STATIC void
160xfs_refcountbt_init_rec_from_cur(
161 struct xfs_btree_cur *cur,
162 union xfs_btree_rec *rec)
163{
164 rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
165 rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
166 rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
167}
168
169STATIC void
170xfs_refcountbt_init_ptr_from_cur(
171 struct xfs_btree_cur *cur,
172 union xfs_btree_ptr *ptr)
173{
174 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
175
176 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
177
178 ptr->s = agf->agf_refcount_root;
179}
180
181STATIC int64_t
182xfs_refcountbt_key_diff(
183 struct xfs_btree_cur *cur,
184 union xfs_btree_key *key)
185{
186 struct xfs_refcount_irec *rec = &cur->bc_rec.rc;
187 struct xfs_refcount_key *kp = &key->refc;
188
189 return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
190}
191
192STATIC int64_t
193xfs_refcountbt_diff_two_keys(
194 struct xfs_btree_cur *cur,
195 union xfs_btree_key *k1,
196 union xfs_btree_key *k2)
197{
198 return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
199 be32_to_cpu(k2->refc.rc_startblock);
200}
201
202STATIC xfs_failaddr_t
203xfs_refcountbt_verify(
204 struct xfs_buf *bp)
205{
206 struct xfs_mount *mp = bp->b_target->bt_mount;
207 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
208 struct xfs_perag *pag = bp->b_pag;
209 xfs_failaddr_t fa;
210 unsigned int level;
211
212 if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
213 return __this_address;
214
215 if (!xfs_sb_version_hasreflink(&mp->m_sb))
216 return __this_address;
217 fa = xfs_btree_sblock_v5hdr_verify(bp);
218 if (fa)
219 return fa;
220
221 level = be16_to_cpu(block->bb_level);
222 if (pag && pag->pagf_init) {
223 if (level >= pag->pagf_refcount_level)
224 return __this_address;
225 } else if (level >= mp->m_refc_maxlevels)
226 return __this_address;
227
228 return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
229}
230
231STATIC void
232xfs_refcountbt_read_verify(
233 struct xfs_buf *bp)
234{
235 xfs_failaddr_t fa;
236
237 if (!xfs_btree_sblock_verify_crc(bp))
238 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
239 else {
240 fa = xfs_refcountbt_verify(bp);
241 if (fa)
242 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
243 }
244
245 if (bp->b_error)
246 trace_xfs_btree_corrupt(bp, _RET_IP_);
247}
248
249STATIC void
250xfs_refcountbt_write_verify(
251 struct xfs_buf *bp)
252{
253 xfs_failaddr_t fa;
254
255 fa = xfs_refcountbt_verify(bp);
256 if (fa) {
257 trace_xfs_btree_corrupt(bp, _RET_IP_);
258 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
259 return;
260 }
261 xfs_btree_sblock_calc_crc(bp);
262
263}
264
265const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
266 .name = "xfs_refcountbt",
267 .verify_read = xfs_refcountbt_read_verify,
268 .verify_write = xfs_refcountbt_write_verify,
269 .verify_struct = xfs_refcountbt_verify,
270};
271
272STATIC int
273xfs_refcountbt_keys_inorder(
274 struct xfs_btree_cur *cur,
275 union xfs_btree_key *k1,
276 union xfs_btree_key *k2)
277{
278 return be32_to_cpu(k1->refc.rc_startblock) <
279 be32_to_cpu(k2->refc.rc_startblock);
280}
281
282STATIC int
283xfs_refcountbt_recs_inorder(
284 struct xfs_btree_cur *cur,
285 union xfs_btree_rec *r1,
286 union xfs_btree_rec *r2)
287{
288 return be32_to_cpu(r1->refc.rc_startblock) +
289 be32_to_cpu(r1->refc.rc_blockcount) <=
290 be32_to_cpu(r2->refc.rc_startblock);
291}
292
293static const struct xfs_btree_ops xfs_refcountbt_ops = {
294 .rec_len = sizeof(struct xfs_refcount_rec),
295 .key_len = sizeof(struct xfs_refcount_key),
296
297 .dup_cursor = xfs_refcountbt_dup_cursor,
298 .set_root = xfs_refcountbt_set_root,
299 .alloc_block = xfs_refcountbt_alloc_block,
300 .free_block = xfs_refcountbt_free_block,
301 .get_minrecs = xfs_refcountbt_get_minrecs,
302 .get_maxrecs = xfs_refcountbt_get_maxrecs,
303 .init_key_from_rec = xfs_refcountbt_init_key_from_rec,
304 .init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec,
305 .init_rec_from_cur = xfs_refcountbt_init_rec_from_cur,
306 .init_ptr_from_cur = xfs_refcountbt_init_ptr_from_cur,
307 .key_diff = xfs_refcountbt_key_diff,
308 .buf_ops = &xfs_refcountbt_buf_ops,
309 .diff_two_keys = xfs_refcountbt_diff_two_keys,
310 .keys_inorder = xfs_refcountbt_keys_inorder,
311 .recs_inorder = xfs_refcountbt_recs_inorder,
312};
313
314
315
316
317struct xfs_btree_cur *
318xfs_refcountbt_init_cursor(
319 struct xfs_mount *mp,
320 struct xfs_trans *tp,
321 struct xfs_buf *agbp,
322 xfs_agnumber_t agno)
323{
324 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
325 struct xfs_btree_cur *cur;
326
327 ASSERT(agno != NULLAGNUMBER);
328 ASSERT(agno < mp->m_sb.sb_agcount);
329 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
330
331 cur->bc_tp = tp;
332 cur->bc_mp = mp;
333 cur->bc_btnum = XFS_BTNUM_REFC;
334 cur->bc_blocklog = mp->m_sb.sb_blocklog;
335 cur->bc_ops = &xfs_refcountbt_ops;
336 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
337
338 cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
339
340 cur->bc_private.a.agbp = agbp;
341 cur->bc_private.a.agno = agno;
342 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
343
344 cur->bc_private.a.priv.refc.nr_ops = 0;
345 cur->bc_private.a.priv.refc.shape_changes = 0;
346
347 return cur;
348}
349
350
351
352
353int
354xfs_refcountbt_maxrecs(
355 int blocklen,
356 bool leaf)
357{
358 blocklen -= XFS_REFCOUNT_BLOCK_LEN;
359
360 if (leaf)
361 return blocklen / sizeof(struct xfs_refcount_rec);
362 return blocklen / (sizeof(struct xfs_refcount_key) +
363 sizeof(xfs_refcount_ptr_t));
364}
365
366
367void
368xfs_refcountbt_compute_maxlevels(
369 struct xfs_mount *mp)
370{
371 mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
372 mp->m_refc_mnr, mp->m_sb.sb_agblocks);
373}
374
375
376xfs_extlen_t
377xfs_refcountbt_calc_size(
378 struct xfs_mount *mp,
379 unsigned long long len)
380{
381 return xfs_btree_calc_size(mp->m_refc_mnr, len);
382}
383
384
385
386
387xfs_extlen_t
388xfs_refcountbt_max_size(
389 struct xfs_mount *mp,
390 xfs_agblock_t agblocks)
391{
392
393 if (mp->m_refc_mxr[0] == 0)
394 return 0;
395
396 return xfs_refcountbt_calc_size(mp, agblocks);
397}
398
399
400
401
402int
403xfs_refcountbt_calc_reserves(
404 struct xfs_mount *mp,
405 struct xfs_trans *tp,
406 xfs_agnumber_t agno,
407 xfs_extlen_t *ask,
408 xfs_extlen_t *used)
409{
410 struct xfs_buf *agbp;
411 struct xfs_agf *agf;
412 xfs_agblock_t agblocks;
413 xfs_extlen_t tree_len;
414 int error;
415
416 if (!xfs_sb_version_hasreflink(&mp->m_sb))
417 return 0;
418
419
420 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
421 if (error)
422 return error;
423
424 agf = XFS_BUF_TO_AGF(agbp);
425 agblocks = be32_to_cpu(agf->agf_length);
426 tree_len = be32_to_cpu(agf->agf_refcount_blocks);
427 xfs_trans_brelse(tp, agbp);
428
429 *ask += xfs_refcountbt_max_size(mp, agblocks);
430 *used += tree_len;
431
432 return error;
433}
434