1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_btree.h"
15#include "xfs_btree_staging.h"
16#include "xfs_alloc_btree.h"
17#include "xfs_alloc.h"
18#include "xfs_extent_busy.h"
19#include "xfs_error.h"
20#include "xfs_trace.h"
21#include "xfs_trans.h"
22
23
24STATIC struct xfs_btree_cur *
25xfs_allocbt_dup_cursor(
26 struct xfs_btree_cur *cur)
27{
28 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
29 cur->bc_ag.agbp, cur->bc_ag.agno,
30 cur->bc_btnum);
31}
32
33STATIC void
34xfs_allocbt_set_root(
35 struct xfs_btree_cur *cur,
36 union xfs_btree_ptr *ptr,
37 int inc)
38{
39 struct xfs_buf *agbp = cur->bc_ag.agbp;
40 struct xfs_agf *agf = agbp->b_addr;
41 int btnum = cur->bc_btnum;
42 struct xfs_perag *pag = agbp->b_pag;
43
44 ASSERT(ptr->s != 0);
45
46 agf->agf_roots[btnum] = ptr->s;
47 be32_add_cpu(&agf->agf_levels[btnum], inc);
48 pag->pagf_levels[btnum] += inc;
49
50 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
51}
52
53STATIC int
54xfs_allocbt_alloc_block(
55 struct xfs_btree_cur *cur,
56 union xfs_btree_ptr *start,
57 union xfs_btree_ptr *new,
58 int *stat)
59{
60 int error;
61 xfs_agblock_t bno;
62
63
64 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
65 &bno, 1);
66 if (error)
67 return error;
68
69 if (bno == NULLAGBLOCK) {
70 *stat = 0;
71 return 0;
72 }
73
74 atomic64_inc(&cur->bc_mp->m_allocbt_blks);
75 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false);
76
77 new->s = cpu_to_be32(bno);
78
79 *stat = 1;
80 return 0;
81}
82
83STATIC int
84xfs_allocbt_free_block(
85 struct xfs_btree_cur *cur,
86 struct xfs_buf *bp)
87{
88 struct xfs_buf *agbp = cur->bc_ag.agbp;
89 struct xfs_agf *agf = agbp->b_addr;
90 xfs_agblock_t bno;
91 int error;
92
93 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
94 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
95 if (error)
96 return error;
97
98 atomic64_dec(&cur->bc_mp->m_allocbt_blks);
99 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
100 XFS_EXTENT_BUSY_SKIP_DISCARD);
101 return 0;
102}
103
104
105
106
107STATIC void
108xfs_allocbt_update_lastrec(
109 struct xfs_btree_cur *cur,
110 struct xfs_btree_block *block,
111 union xfs_btree_rec *rec,
112 int ptr,
113 int reason)
114{
115 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
116 struct xfs_perag *pag;
117 __be32 len;
118 int numrecs;
119
120 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
121
122 switch (reason) {
123 case LASTREC_UPDATE:
124
125
126
127
128 if (ptr != xfs_btree_get_numrecs(block))
129 return;
130 len = rec->alloc.ar_blockcount;
131 break;
132 case LASTREC_INSREC:
133 if (be32_to_cpu(rec->alloc.ar_blockcount) <=
134 be32_to_cpu(agf->agf_longest))
135 return;
136 len = rec->alloc.ar_blockcount;
137 break;
138 case LASTREC_DELREC:
139 numrecs = xfs_btree_get_numrecs(block);
140 if (ptr <= numrecs)
141 return;
142 ASSERT(ptr == numrecs + 1);
143
144 if (numrecs) {
145 xfs_alloc_rec_t *rrp;
146
147 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
148 len = rrp->ar_blockcount;
149 } else {
150 len = 0;
151 }
152
153 break;
154 default:
155 ASSERT(0);
156 return;
157 }
158
159 agf->agf_longest = len;
160 pag = cur->bc_ag.agbp->b_pag;
161 pag->pagf_longest = be32_to_cpu(len);
162 xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
163}
164
165STATIC int
166xfs_allocbt_get_minrecs(
167 struct xfs_btree_cur *cur,
168 int level)
169{
170 return cur->bc_mp->m_alloc_mnr[level != 0];
171}
172
173STATIC int
174xfs_allocbt_get_maxrecs(
175 struct xfs_btree_cur *cur,
176 int level)
177{
178 return cur->bc_mp->m_alloc_mxr[level != 0];
179}
180
181STATIC void
182xfs_allocbt_init_key_from_rec(
183 union xfs_btree_key *key,
184 union xfs_btree_rec *rec)
185{
186 key->alloc.ar_startblock = rec->alloc.ar_startblock;
187 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
188}
189
190STATIC void
191xfs_bnobt_init_high_key_from_rec(
192 union xfs_btree_key *key,
193 union xfs_btree_rec *rec)
194{
195 __u32 x;
196
197 x = be32_to_cpu(rec->alloc.ar_startblock);
198 x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
199 key->alloc.ar_startblock = cpu_to_be32(x);
200 key->alloc.ar_blockcount = 0;
201}
202
203STATIC void
204xfs_cntbt_init_high_key_from_rec(
205 union xfs_btree_key *key,
206 union xfs_btree_rec *rec)
207{
208 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
209 key->alloc.ar_startblock = 0;
210}
211
212STATIC void
213xfs_allocbt_init_rec_from_cur(
214 struct xfs_btree_cur *cur,
215 union xfs_btree_rec *rec)
216{
217 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
218 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
219}
220
221STATIC void
222xfs_allocbt_init_ptr_from_cur(
223 struct xfs_btree_cur *cur,
224 union xfs_btree_ptr *ptr)
225{
226 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
227
228 ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
229
230 ptr->s = agf->agf_roots[cur->bc_btnum];
231}
232
233STATIC int64_t
234xfs_bnobt_key_diff(
235 struct xfs_btree_cur *cur,
236 union xfs_btree_key *key)
237{
238 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
239 xfs_alloc_key_t *kp = &key->alloc;
240
241 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
242}
243
244STATIC int64_t
245xfs_cntbt_key_diff(
246 struct xfs_btree_cur *cur,
247 union xfs_btree_key *key)
248{
249 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
250 xfs_alloc_key_t *kp = &key->alloc;
251 int64_t diff;
252
253 diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
254 if (diff)
255 return diff;
256
257 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
258}
259
260STATIC int64_t
261xfs_bnobt_diff_two_keys(
262 struct xfs_btree_cur *cur,
263 union xfs_btree_key *k1,
264 union xfs_btree_key *k2)
265{
266 return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
267 be32_to_cpu(k2->alloc.ar_startblock);
268}
269
270STATIC int64_t
271xfs_cntbt_diff_two_keys(
272 struct xfs_btree_cur *cur,
273 union xfs_btree_key *k1,
274 union xfs_btree_key *k2)
275{
276 int64_t diff;
277
278 diff = be32_to_cpu(k1->alloc.ar_blockcount) -
279 be32_to_cpu(k2->alloc.ar_blockcount);
280 if (diff)
281 return diff;
282
283 return be32_to_cpu(k1->alloc.ar_startblock) -
284 be32_to_cpu(k2->alloc.ar_startblock);
285}
286
287static xfs_failaddr_t
288xfs_allocbt_verify(
289 struct xfs_buf *bp)
290{
291 struct xfs_mount *mp = bp->b_mount;
292 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
293 struct xfs_perag *pag = bp->b_pag;
294 xfs_failaddr_t fa;
295 unsigned int level;
296 xfs_btnum_t btnum = XFS_BTNUM_BNOi;
297
298 if (!xfs_verify_magic(bp, block->bb_magic))
299 return __this_address;
300
301 if (xfs_sb_version_hascrc(&mp->m_sb)) {
302 fa = xfs_btree_sblock_v5hdr_verify(bp);
303 if (fa)
304 return fa;
305 }
306
307
308
309
310
311
312
313
314
315
316 level = be16_to_cpu(block->bb_level);
317 if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
318 btnum = XFS_BTNUM_CNTi;
319 if (pag && pag->pagf_init) {
320 if (level >= pag->pagf_levels[btnum])
321 return __this_address;
322 } else if (level >= mp->m_ag_maxlevels)
323 return __this_address;
324
325 return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
326}
327
328static void
329xfs_allocbt_read_verify(
330 struct xfs_buf *bp)
331{
332 xfs_failaddr_t fa;
333
334 if (!xfs_btree_sblock_verify_crc(bp))
335 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
336 else {
337 fa = xfs_allocbt_verify(bp);
338 if (fa)
339 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
340 }
341
342 if (bp->b_error)
343 trace_xfs_btree_corrupt(bp, _RET_IP_);
344}
345
346static void
347xfs_allocbt_write_verify(
348 struct xfs_buf *bp)
349{
350 xfs_failaddr_t fa;
351
352 fa = xfs_allocbt_verify(bp);
353 if (fa) {
354 trace_xfs_btree_corrupt(bp, _RET_IP_);
355 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
356 return;
357 }
358 xfs_btree_sblock_calc_crc(bp);
359
360}
361
362const struct xfs_buf_ops xfs_bnobt_buf_ops = {
363 .name = "xfs_bnobt",
364 .magic = { cpu_to_be32(XFS_ABTB_MAGIC),
365 cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
366 .verify_read = xfs_allocbt_read_verify,
367 .verify_write = xfs_allocbt_write_verify,
368 .verify_struct = xfs_allocbt_verify,
369};
370
371const struct xfs_buf_ops xfs_cntbt_buf_ops = {
372 .name = "xfs_cntbt",
373 .magic = { cpu_to_be32(XFS_ABTC_MAGIC),
374 cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
375 .verify_read = xfs_allocbt_read_verify,
376 .verify_write = xfs_allocbt_write_verify,
377 .verify_struct = xfs_allocbt_verify,
378};
379
380STATIC int
381xfs_bnobt_keys_inorder(
382 struct xfs_btree_cur *cur,
383 union xfs_btree_key *k1,
384 union xfs_btree_key *k2)
385{
386 return be32_to_cpu(k1->alloc.ar_startblock) <
387 be32_to_cpu(k2->alloc.ar_startblock);
388}
389
390STATIC int
391xfs_bnobt_recs_inorder(
392 struct xfs_btree_cur *cur,
393 union xfs_btree_rec *r1,
394 union xfs_btree_rec *r2)
395{
396 return be32_to_cpu(r1->alloc.ar_startblock) +
397 be32_to_cpu(r1->alloc.ar_blockcount) <=
398 be32_to_cpu(r2->alloc.ar_startblock);
399}
400
401STATIC int
402xfs_cntbt_keys_inorder(
403 struct xfs_btree_cur *cur,
404 union xfs_btree_key *k1,
405 union xfs_btree_key *k2)
406{
407 return be32_to_cpu(k1->alloc.ar_blockcount) <
408 be32_to_cpu(k2->alloc.ar_blockcount) ||
409 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
410 be32_to_cpu(k1->alloc.ar_startblock) <
411 be32_to_cpu(k2->alloc.ar_startblock));
412}
413
414STATIC int
415xfs_cntbt_recs_inorder(
416 struct xfs_btree_cur *cur,
417 union xfs_btree_rec *r1,
418 union xfs_btree_rec *r2)
419{
420 return be32_to_cpu(r1->alloc.ar_blockcount) <
421 be32_to_cpu(r2->alloc.ar_blockcount) ||
422 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
423 be32_to_cpu(r1->alloc.ar_startblock) <
424 be32_to_cpu(r2->alloc.ar_startblock));
425}
426
427static const struct xfs_btree_ops xfs_bnobt_ops = {
428 .rec_len = sizeof(xfs_alloc_rec_t),
429 .key_len = sizeof(xfs_alloc_key_t),
430
431 .dup_cursor = xfs_allocbt_dup_cursor,
432 .set_root = xfs_allocbt_set_root,
433 .alloc_block = xfs_allocbt_alloc_block,
434 .free_block = xfs_allocbt_free_block,
435 .update_lastrec = xfs_allocbt_update_lastrec,
436 .get_minrecs = xfs_allocbt_get_minrecs,
437 .get_maxrecs = xfs_allocbt_get_maxrecs,
438 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
439 .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
440 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
441 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
442 .key_diff = xfs_bnobt_key_diff,
443 .buf_ops = &xfs_bnobt_buf_ops,
444 .diff_two_keys = xfs_bnobt_diff_two_keys,
445 .keys_inorder = xfs_bnobt_keys_inorder,
446 .recs_inorder = xfs_bnobt_recs_inorder,
447};
448
449static const struct xfs_btree_ops xfs_cntbt_ops = {
450 .rec_len = sizeof(xfs_alloc_rec_t),
451 .key_len = sizeof(xfs_alloc_key_t),
452
453 .dup_cursor = xfs_allocbt_dup_cursor,
454 .set_root = xfs_allocbt_set_root,
455 .alloc_block = xfs_allocbt_alloc_block,
456 .free_block = xfs_allocbt_free_block,
457 .update_lastrec = xfs_allocbt_update_lastrec,
458 .get_minrecs = xfs_allocbt_get_minrecs,
459 .get_maxrecs = xfs_allocbt_get_maxrecs,
460 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
461 .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
462 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
463 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
464 .key_diff = xfs_cntbt_key_diff,
465 .buf_ops = &xfs_cntbt_buf_ops,
466 .diff_two_keys = xfs_cntbt_diff_two_keys,
467 .keys_inorder = xfs_cntbt_keys_inorder,
468 .recs_inorder = xfs_cntbt_recs_inorder,
469};
470
471
472STATIC struct xfs_btree_cur *
473xfs_allocbt_init_common(
474 struct xfs_mount *mp,
475 struct xfs_trans *tp,
476 xfs_agnumber_t agno,
477 xfs_btnum_t btnum)
478{
479 struct xfs_btree_cur *cur;
480
481 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
482
483 cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
484
485 cur->bc_tp = tp;
486 cur->bc_mp = mp;
487 cur->bc_btnum = btnum;
488 cur->bc_blocklog = mp->m_sb.sb_blocklog;
489
490 if (btnum == XFS_BTNUM_CNT) {
491 cur->bc_ops = &xfs_cntbt_ops;
492 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
493 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
494 } else {
495 cur->bc_ops = &xfs_bnobt_ops;
496 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
497 }
498
499 cur->bc_ag.agno = agno;
500 cur->bc_ag.abt.active = false;
501
502 if (xfs_sb_version_hascrc(&mp->m_sb))
503 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
504
505 return cur;
506}
507
508
509
510
511struct xfs_btree_cur *
512xfs_allocbt_init_cursor(
513 struct xfs_mount *mp,
514 struct xfs_trans *tp,
515 struct xfs_buf *agbp,
516 xfs_agnumber_t agno,
517 xfs_btnum_t btnum)
518{
519 struct xfs_agf *agf = agbp->b_addr;
520 struct xfs_btree_cur *cur;
521
522 cur = xfs_allocbt_init_common(mp, tp, agno, btnum);
523 if (btnum == XFS_BTNUM_CNT)
524 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
525 else
526 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
527
528 cur->bc_ag.agbp = agbp;
529
530 return cur;
531}
532
533
534struct xfs_btree_cur *
535xfs_allocbt_stage_cursor(
536 struct xfs_mount *mp,
537 struct xbtree_afakeroot *afake,
538 xfs_agnumber_t agno,
539 xfs_btnum_t btnum)
540{
541 struct xfs_btree_cur *cur;
542
543 cur = xfs_allocbt_init_common(mp, NULL, agno, btnum);
544 xfs_btree_stage_afakeroot(cur, afake);
545 return cur;
546}
547
548
549
550
551
552void
553xfs_allocbt_commit_staged_btree(
554 struct xfs_btree_cur *cur,
555 struct xfs_trans *tp,
556 struct xfs_buf *agbp)
557{
558 struct xfs_agf *agf = agbp->b_addr;
559 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
560
561 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
562
563 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
564 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
565 xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
566
567 if (cur->bc_btnum == XFS_BTNUM_BNO) {
568 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
569 } else {
570 cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
571 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
572 }
573}
574
575
576
577
578int
579xfs_allocbt_maxrecs(
580 struct xfs_mount *mp,
581 int blocklen,
582 int leaf)
583{
584 blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
585
586 if (leaf)
587 return blocklen / sizeof(xfs_alloc_rec_t);
588 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
589}
590
591
592xfs_extlen_t
593xfs_allocbt_calc_size(
594 struct xfs_mount *mp,
595 unsigned long long len)
596{
597 return xfs_btree_calc_size(mp->m_alloc_mnr, len);
598}
599