1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_trans.h"
14#include "xfs_alloc.h"
15#include "xfs_btree.h"
16#include "xfs_btree_staging.h"
17#include "xfs_rmap.h"
18#include "xfs_rmap_btree.h"
19#include "xfs_trace.h"
20#include "xfs_error.h"
21#include "xfs_extent_busy.h"
22#include "xfs_ag.h"
23#include "xfs_ag_resv.h"
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50static struct xfs_btree_cur *
51xfs_rmapbt_dup_cursor(
52 struct xfs_btree_cur *cur)
53{
54 return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
55 cur->bc_ag.agbp, cur->bc_ag.pag);
56}
57
58STATIC void
59xfs_rmapbt_set_root(
60 struct xfs_btree_cur *cur,
61 union xfs_btree_ptr *ptr,
62 int inc)
63{
64 struct xfs_buf *agbp = cur->bc_ag.agbp;
65 struct xfs_agf *agf = agbp->b_addr;
66 int btnum = cur->bc_btnum;
67
68 ASSERT(ptr->s != 0);
69
70 agf->agf_roots[btnum] = ptr->s;
71 be32_add_cpu(&agf->agf_levels[btnum], inc);
72 cur->bc_ag.pag->pagf_levels[btnum] += inc;
73
74 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
75}
76
77STATIC int
78xfs_rmapbt_alloc_block(
79 struct xfs_btree_cur *cur,
80 union xfs_btree_ptr *start,
81 union xfs_btree_ptr *new,
82 int *stat)
83{
84 struct xfs_buf *agbp = cur->bc_ag.agbp;
85 struct xfs_agf *agf = agbp->b_addr;
86 struct xfs_perag *pag = cur->bc_ag.pag;
87 int error;
88 xfs_agblock_t bno;
89
90
91 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
92 &bno, 1);
93 if (error)
94 return error;
95
96 trace_xfs_rmapbt_alloc_block(cur->bc_mp, pag->pag_agno, bno, 1);
97 if (bno == NULLAGBLOCK) {
98 *stat = 0;
99 return 0;
100 }
101
102 xfs_extent_busy_reuse(cur->bc_mp, pag, bno, 1, false);
103
104 new->s = cpu_to_be32(bno);
105 be32_add_cpu(&agf->agf_rmap_blocks, 1);
106 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
107
108 xfs_ag_resv_rmapbt_alloc(cur->bc_mp, pag->pag_agno);
109
110 *stat = 1;
111 return 0;
112}
113
114STATIC int
115xfs_rmapbt_free_block(
116 struct xfs_btree_cur *cur,
117 struct xfs_buf *bp)
118{
119 struct xfs_buf *agbp = cur->bc_ag.agbp;
120 struct xfs_agf *agf = agbp->b_addr;
121 struct xfs_perag *pag = cur->bc_ag.pag;
122 xfs_agblock_t bno;
123 int error;
124
125 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
126 trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno,
127 bno, 1);
128 be32_add_cpu(&agf->agf_rmap_blocks, -1);
129 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
130 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
131 if (error)
132 return error;
133
134 xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1,
135 XFS_EXTENT_BUSY_SKIP_DISCARD);
136
137 xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1);
138 return 0;
139}
140
141STATIC int
142xfs_rmapbt_get_minrecs(
143 struct xfs_btree_cur *cur,
144 int level)
145{
146 return cur->bc_mp->m_rmap_mnr[level != 0];
147}
148
149STATIC int
150xfs_rmapbt_get_maxrecs(
151 struct xfs_btree_cur *cur,
152 int level)
153{
154 return cur->bc_mp->m_rmap_mxr[level != 0];
155}
156
157STATIC void
158xfs_rmapbt_init_key_from_rec(
159 union xfs_btree_key *key,
160 union xfs_btree_rec *rec)
161{
162 key->rmap.rm_startblock = rec->rmap.rm_startblock;
163 key->rmap.rm_owner = rec->rmap.rm_owner;
164 key->rmap.rm_offset = rec->rmap.rm_offset;
165}
166
167
168
169
170
171
172
173
174STATIC void
175xfs_rmapbt_init_high_key_from_rec(
176 union xfs_btree_key *key,
177 union xfs_btree_rec *rec)
178{
179 uint64_t off;
180 int adj;
181
182 adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
183
184 key->rmap.rm_startblock = rec->rmap.rm_startblock;
185 be32_add_cpu(&key->rmap.rm_startblock, adj);
186 key->rmap.rm_owner = rec->rmap.rm_owner;
187 key->rmap.rm_offset = rec->rmap.rm_offset;
188 if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
189 XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
190 return;
191 off = be64_to_cpu(key->rmap.rm_offset);
192 off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
193 key->rmap.rm_offset = cpu_to_be64(off);
194}
195
196STATIC void
197xfs_rmapbt_init_rec_from_cur(
198 struct xfs_btree_cur *cur,
199 union xfs_btree_rec *rec)
200{
201 rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
202 rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
203 rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
204 rec->rmap.rm_offset = cpu_to_be64(
205 xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
206}
207
208STATIC void
209xfs_rmapbt_init_ptr_from_cur(
210 struct xfs_btree_cur *cur,
211 union xfs_btree_ptr *ptr)
212{
213 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
214
215 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
216
217 ptr->s = agf->agf_roots[cur->bc_btnum];
218}
219
220STATIC int64_t
221xfs_rmapbt_key_diff(
222 struct xfs_btree_cur *cur,
223 union xfs_btree_key *key)
224{
225 struct xfs_rmap_irec *rec = &cur->bc_rec.r;
226 struct xfs_rmap_key *kp = &key->rmap;
227 __u64 x, y;
228 int64_t d;
229
230 d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
231 if (d)
232 return d;
233
234 x = be64_to_cpu(kp->rm_owner);
235 y = rec->rm_owner;
236 if (x > y)
237 return 1;
238 else if (y > x)
239 return -1;
240
241 x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
242 y = rec->rm_offset;
243 if (x > y)
244 return 1;
245 else if (y > x)
246 return -1;
247 return 0;
248}
249
250STATIC int64_t
251xfs_rmapbt_diff_two_keys(
252 struct xfs_btree_cur *cur,
253 union xfs_btree_key *k1,
254 union xfs_btree_key *k2)
255{
256 struct xfs_rmap_key *kp1 = &k1->rmap;
257 struct xfs_rmap_key *kp2 = &k2->rmap;
258 int64_t d;
259 __u64 x, y;
260
261 d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
262 be32_to_cpu(kp2->rm_startblock);
263 if (d)
264 return d;
265
266 x = be64_to_cpu(kp1->rm_owner);
267 y = be64_to_cpu(kp2->rm_owner);
268 if (x > y)
269 return 1;
270 else if (y > x)
271 return -1;
272
273 x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
274 y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
275 if (x > y)
276 return 1;
277 else if (y > x)
278 return -1;
279 return 0;
280}
281
282static xfs_failaddr_t
283xfs_rmapbt_verify(
284 struct xfs_buf *bp)
285{
286 struct xfs_mount *mp = bp->b_mount;
287 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
288 struct xfs_perag *pag = bp->b_pag;
289 xfs_failaddr_t fa;
290 unsigned int level;
291
292
293
294
295
296
297
298
299
300
301
302
303
304 if (!xfs_verify_magic(bp, block->bb_magic))
305 return __this_address;
306
307 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
308 return __this_address;
309 fa = xfs_btree_sblock_v5hdr_verify(bp);
310 if (fa)
311 return fa;
312
313 level = be16_to_cpu(block->bb_level);
314 if (pag && pag->pagf_init) {
315 if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
316 return __this_address;
317 } else if (level >= mp->m_rmap_maxlevels)
318 return __this_address;
319
320 return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
321}
322
323static void
324xfs_rmapbt_read_verify(
325 struct xfs_buf *bp)
326{
327 xfs_failaddr_t fa;
328
329 if (!xfs_btree_sblock_verify_crc(bp))
330 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
331 else {
332 fa = xfs_rmapbt_verify(bp);
333 if (fa)
334 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
335 }
336
337 if (bp->b_error)
338 trace_xfs_btree_corrupt(bp, _RET_IP_);
339}
340
341static void
342xfs_rmapbt_write_verify(
343 struct xfs_buf *bp)
344{
345 xfs_failaddr_t fa;
346
347 fa = xfs_rmapbt_verify(bp);
348 if (fa) {
349 trace_xfs_btree_corrupt(bp, _RET_IP_);
350 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
351 return;
352 }
353 xfs_btree_sblock_calc_crc(bp);
354
355}
356
357const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
358 .name = "xfs_rmapbt",
359 .magic = { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) },
360 .verify_read = xfs_rmapbt_read_verify,
361 .verify_write = xfs_rmapbt_write_verify,
362 .verify_struct = xfs_rmapbt_verify,
363};
364
365STATIC int
366xfs_rmapbt_keys_inorder(
367 struct xfs_btree_cur *cur,
368 union xfs_btree_key *k1,
369 union xfs_btree_key *k2)
370{
371 uint32_t x;
372 uint32_t y;
373 uint64_t a;
374 uint64_t b;
375
376 x = be32_to_cpu(k1->rmap.rm_startblock);
377 y = be32_to_cpu(k2->rmap.rm_startblock);
378 if (x < y)
379 return 1;
380 else if (x > y)
381 return 0;
382 a = be64_to_cpu(k1->rmap.rm_owner);
383 b = be64_to_cpu(k2->rmap.rm_owner);
384 if (a < b)
385 return 1;
386 else if (a > b)
387 return 0;
388 a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
389 b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
390 if (a <= b)
391 return 1;
392 return 0;
393}
394
395STATIC int
396xfs_rmapbt_recs_inorder(
397 struct xfs_btree_cur *cur,
398 union xfs_btree_rec *r1,
399 union xfs_btree_rec *r2)
400{
401 uint32_t x;
402 uint32_t y;
403 uint64_t a;
404 uint64_t b;
405
406 x = be32_to_cpu(r1->rmap.rm_startblock);
407 y = be32_to_cpu(r2->rmap.rm_startblock);
408 if (x < y)
409 return 1;
410 else if (x > y)
411 return 0;
412 a = be64_to_cpu(r1->rmap.rm_owner);
413 b = be64_to_cpu(r2->rmap.rm_owner);
414 if (a < b)
415 return 1;
416 else if (a > b)
417 return 0;
418 a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
419 b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
420 if (a <= b)
421 return 1;
422 return 0;
423}
424
425static const struct xfs_btree_ops xfs_rmapbt_ops = {
426 .rec_len = sizeof(struct xfs_rmap_rec),
427 .key_len = 2 * sizeof(struct xfs_rmap_key),
428
429 .dup_cursor = xfs_rmapbt_dup_cursor,
430 .set_root = xfs_rmapbt_set_root,
431 .alloc_block = xfs_rmapbt_alloc_block,
432 .free_block = xfs_rmapbt_free_block,
433 .get_minrecs = xfs_rmapbt_get_minrecs,
434 .get_maxrecs = xfs_rmapbt_get_maxrecs,
435 .init_key_from_rec = xfs_rmapbt_init_key_from_rec,
436 .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
437 .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
438 .init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur,
439 .key_diff = xfs_rmapbt_key_diff,
440 .buf_ops = &xfs_rmapbt_buf_ops,
441 .diff_two_keys = xfs_rmapbt_diff_two_keys,
442 .keys_inorder = xfs_rmapbt_keys_inorder,
443 .recs_inorder = xfs_rmapbt_recs_inorder,
444};
445
446static struct xfs_btree_cur *
447xfs_rmapbt_init_common(
448 struct xfs_mount *mp,
449 struct xfs_trans *tp,
450 struct xfs_perag *pag)
451{
452 struct xfs_btree_cur *cur;
453
454 cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
455 cur->bc_tp = tp;
456 cur->bc_mp = mp;
457
458 cur->bc_btnum = XFS_BTNUM_RMAP;
459 cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
460 cur->bc_blocklog = mp->m_sb.sb_blocklog;
461 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
462 cur->bc_ops = &xfs_rmapbt_ops;
463
464
465 atomic_inc(&pag->pag_ref);
466 cur->bc_ag.pag = pag;
467
468 return cur;
469}
470
471
472struct xfs_btree_cur *
473xfs_rmapbt_init_cursor(
474 struct xfs_mount *mp,
475 struct xfs_trans *tp,
476 struct xfs_buf *agbp,
477 struct xfs_perag *pag)
478{
479 struct xfs_agf *agf = agbp->b_addr;
480 struct xfs_btree_cur *cur;
481
482 cur = xfs_rmapbt_init_common(mp, tp, pag);
483 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
484 cur->bc_ag.agbp = agbp;
485 return cur;
486}
487
488
489struct xfs_btree_cur *
490xfs_rmapbt_stage_cursor(
491 struct xfs_mount *mp,
492 struct xbtree_afakeroot *afake,
493 struct xfs_perag *pag)
494{
495 struct xfs_btree_cur *cur;
496
497 cur = xfs_rmapbt_init_common(mp, NULL, pag);
498 xfs_btree_stage_afakeroot(cur, afake);
499 return cur;
500}
501
502
503
504
505
506void
507xfs_rmapbt_commit_staged_btree(
508 struct xfs_btree_cur *cur,
509 struct xfs_trans *tp,
510 struct xfs_buf *agbp)
511{
512 struct xfs_agf *agf = agbp->b_addr;
513 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
514
515 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
516
517 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
518 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
519 agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
520 xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
521 XFS_AGF_RMAP_BLOCKS);
522 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
523}
524
525
526
527
528int
529xfs_rmapbt_maxrecs(
530 int blocklen,
531 int leaf)
532{
533 blocklen -= XFS_RMAP_BLOCK_LEN;
534
535 if (leaf)
536 return blocklen / sizeof(struct xfs_rmap_rec);
537 return blocklen /
538 (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
539}
540
541
542void
543xfs_rmapbt_compute_maxlevels(
544 struct xfs_mount *mp)
545{
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561 if (xfs_sb_version_hasreflink(&mp->m_sb))
562 mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
563 else
564 mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
565 mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
566}
567
568
569xfs_extlen_t
570xfs_rmapbt_calc_size(
571 struct xfs_mount *mp,
572 unsigned long long len)
573{
574 return xfs_btree_calc_size(mp->m_rmap_mnr, len);
575}
576
577
578
579
580xfs_extlen_t
581xfs_rmapbt_max_size(
582 struct xfs_mount *mp,
583 xfs_agblock_t agblocks)
584{
585
586 if (mp->m_rmap_mxr[0] == 0)
587 return 0;
588
589 return xfs_rmapbt_calc_size(mp, agblocks);
590}
591
592
593
594
595int
596xfs_rmapbt_calc_reserves(
597 struct xfs_mount *mp,
598 struct xfs_trans *tp,
599 struct xfs_perag *pag,
600 xfs_extlen_t *ask,
601 xfs_extlen_t *used)
602{
603 struct xfs_buf *agbp;
604 struct xfs_agf *agf;
605 xfs_agblock_t agblocks;
606 xfs_extlen_t tree_len;
607 int error;
608
609 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
610 return 0;
611
612 error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
613 if (error)
614 return error;
615
616 agf = agbp->b_addr;
617 agblocks = be32_to_cpu(agf->agf_length);
618 tree_len = be32_to_cpu(agf->agf_rmap_blocks);
619 xfs_trans_brelse(tp, agbp);
620
621
622
623
624
625
626 if (mp->m_sb.sb_logstart &&
627 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
628 agblocks -= mp->m_sb.sb_logblocks;
629
630
631 *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
632 *used += tree_len;
633
634 return error;
635}
636