linux/fs/xfs/libxfs/xfs_alloc_btree.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_btree.h"
  14#include "xfs_btree_staging.h"
  15#include "xfs_alloc_btree.h"
  16#include "xfs_alloc.h"
  17#include "xfs_extent_busy.h"
  18#include "xfs_error.h"
  19#include "xfs_trace.h"
  20#include "xfs_trans.h"
  21#include "xfs_ag.h"
  22
  23static struct kmem_cache        *xfs_allocbt_cur_cache;
  24
  25STATIC struct xfs_btree_cur *
  26xfs_allocbt_dup_cursor(
  27        struct xfs_btree_cur    *cur)
  28{
  29        return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
  30                        cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
  31}
  32
  33STATIC void
  34xfs_allocbt_set_root(
  35        struct xfs_btree_cur            *cur,
  36        const union xfs_btree_ptr       *ptr,
  37        int                             inc)
  38{
  39        struct xfs_buf          *agbp = cur->bc_ag.agbp;
  40        struct xfs_agf          *agf = agbp->b_addr;
  41        int                     btnum = cur->bc_btnum;
  42
  43        ASSERT(ptr->s != 0);
  44
  45        agf->agf_roots[btnum] = ptr->s;
  46        be32_add_cpu(&agf->agf_levels[btnum], inc);
  47        cur->bc_ag.pag->pagf_levels[btnum] += inc;
  48
  49        xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
  50}
  51
  52STATIC int
  53xfs_allocbt_alloc_block(
  54        struct xfs_btree_cur            *cur,
  55        const union xfs_btree_ptr       *start,
  56        union xfs_btree_ptr             *new,
  57        int                             *stat)
  58{
  59        int                     error;
  60        xfs_agblock_t           bno;
  61
  62        /* Allocate the new block from the freelist. If we can't, give up.  */
  63        error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
  64                                       &bno, 1);
  65        if (error)
  66                return error;
  67
  68        if (bno == NULLAGBLOCK) {
  69                *stat = 0;
  70                return 0;
  71        }
  72
  73        atomic64_inc(&cur->bc_mp->m_allocbt_blks);
  74        xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agbp->b_pag, bno, 1, false);
  75
  76        new->s = cpu_to_be32(bno);
  77
  78        *stat = 1;
  79        return 0;
  80}
  81
  82STATIC int
  83xfs_allocbt_free_block(
  84        struct xfs_btree_cur    *cur,
  85        struct xfs_buf          *bp)
  86{
  87        struct xfs_buf          *agbp = cur->bc_ag.agbp;
  88        xfs_agblock_t           bno;
  89        int                     error;
  90
  91        bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
  92        error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
  93        if (error)
  94                return error;
  95
  96        atomic64_dec(&cur->bc_mp->m_allocbt_blks);
  97        xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1,
  98                              XFS_EXTENT_BUSY_SKIP_DISCARD);
  99        return 0;
 100}
 101
 102/*
 103 * Update the longest extent in the AGF
 104 */
 105STATIC void
 106xfs_allocbt_update_lastrec(
 107        struct xfs_btree_cur            *cur,
 108        const struct xfs_btree_block    *block,
 109        const union xfs_btree_rec       *rec,
 110        int                             ptr,
 111        int                             reason)
 112{
 113        struct xfs_agf          *agf = cur->bc_ag.agbp->b_addr;
 114        struct xfs_perag        *pag;
 115        __be32                  len;
 116        int                     numrecs;
 117
 118        ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
 119
 120        switch (reason) {
 121        case LASTREC_UPDATE:
 122                /*
 123                 * If this is the last leaf block and it's the last record,
 124                 * then update the size of the longest extent in the AG.
 125                 */
 126                if (ptr != xfs_btree_get_numrecs(block))
 127                        return;
 128                len = rec->alloc.ar_blockcount;
 129                break;
 130        case LASTREC_INSREC:
 131                if (be32_to_cpu(rec->alloc.ar_blockcount) <=
 132                    be32_to_cpu(agf->agf_longest))
 133                        return;
 134                len = rec->alloc.ar_blockcount;
 135                break;
 136        case LASTREC_DELREC:
 137                numrecs = xfs_btree_get_numrecs(block);
 138                if (ptr <= numrecs)
 139                        return;
 140                ASSERT(ptr == numrecs + 1);
 141
 142                if (numrecs) {
 143                        xfs_alloc_rec_t *rrp;
 144
 145                        rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
 146                        len = rrp->ar_blockcount;
 147                } else {
 148                        len = 0;
 149                }
 150
 151                break;
 152        default:
 153                ASSERT(0);
 154                return;
 155        }
 156
 157        agf->agf_longest = len;
 158        pag = cur->bc_ag.agbp->b_pag;
 159        pag->pagf_longest = be32_to_cpu(len);
 160        xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
 161}
 162
 163STATIC int
 164xfs_allocbt_get_minrecs(
 165        struct xfs_btree_cur    *cur,
 166        int                     level)
 167{
 168        return cur->bc_mp->m_alloc_mnr[level != 0];
 169}
 170
 171STATIC int
 172xfs_allocbt_get_maxrecs(
 173        struct xfs_btree_cur    *cur,
 174        int                     level)
 175{
 176        return cur->bc_mp->m_alloc_mxr[level != 0];
 177}
 178
 179STATIC void
 180xfs_allocbt_init_key_from_rec(
 181        union xfs_btree_key             *key,
 182        const union xfs_btree_rec       *rec)
 183{
 184        key->alloc.ar_startblock = rec->alloc.ar_startblock;
 185        key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
 186}
 187
 188STATIC void
 189xfs_bnobt_init_high_key_from_rec(
 190        union xfs_btree_key             *key,
 191        const union xfs_btree_rec       *rec)
 192{
 193        __u32                           x;
 194
 195        x = be32_to_cpu(rec->alloc.ar_startblock);
 196        x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
 197        key->alloc.ar_startblock = cpu_to_be32(x);
 198        key->alloc.ar_blockcount = 0;
 199}
 200
 201STATIC void
 202xfs_cntbt_init_high_key_from_rec(
 203        union xfs_btree_key             *key,
 204        const union xfs_btree_rec       *rec)
 205{
 206        key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
 207        key->alloc.ar_startblock = 0;
 208}
 209
 210STATIC void
 211xfs_allocbt_init_rec_from_cur(
 212        struct xfs_btree_cur    *cur,
 213        union xfs_btree_rec     *rec)
 214{
 215        rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
 216        rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
 217}
 218
 219STATIC void
 220xfs_allocbt_init_ptr_from_cur(
 221        struct xfs_btree_cur    *cur,
 222        union xfs_btree_ptr     *ptr)
 223{
 224        struct xfs_agf          *agf = cur->bc_ag.agbp->b_addr;
 225
 226        ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
 227
 228        ptr->s = agf->agf_roots[cur->bc_btnum];
 229}
 230
 231STATIC int64_t
 232xfs_bnobt_key_diff(
 233        struct xfs_btree_cur            *cur,
 234        const union xfs_btree_key       *key)
 235{
 236        struct xfs_alloc_rec_incore     *rec = &cur->bc_rec.a;
 237        const struct xfs_alloc_rec      *kp = &key->alloc;
 238
 239        return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
 240}
 241
 242STATIC int64_t
 243xfs_cntbt_key_diff(
 244        struct xfs_btree_cur            *cur,
 245        const union xfs_btree_key       *key)
 246{
 247        struct xfs_alloc_rec_incore     *rec = &cur->bc_rec.a;
 248        const struct xfs_alloc_rec      *kp = &key->alloc;
 249        int64_t                         diff;
 250
 251        diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
 252        if (diff)
 253                return diff;
 254
 255        return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
 256}
 257
 258STATIC int64_t
 259xfs_bnobt_diff_two_keys(
 260        struct xfs_btree_cur            *cur,
 261        const union xfs_btree_key       *k1,
 262        const union xfs_btree_key       *k2)
 263{
 264        return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
 265                          be32_to_cpu(k2->alloc.ar_startblock);
 266}
 267
 268STATIC int64_t
 269xfs_cntbt_diff_two_keys(
 270        struct xfs_btree_cur            *cur,
 271        const union xfs_btree_key       *k1,
 272        const union xfs_btree_key       *k2)
 273{
 274        int64_t                         diff;
 275
 276        diff =  be32_to_cpu(k1->alloc.ar_blockcount) -
 277                be32_to_cpu(k2->alloc.ar_blockcount);
 278        if (diff)
 279                return diff;
 280
 281        return  be32_to_cpu(k1->alloc.ar_startblock) -
 282                be32_to_cpu(k2->alloc.ar_startblock);
 283}
 284
 285static xfs_failaddr_t
 286xfs_allocbt_verify(
 287        struct xfs_buf          *bp)
 288{
 289        struct xfs_mount        *mp = bp->b_mount;
 290        struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
 291        struct xfs_perag        *pag = bp->b_pag;
 292        xfs_failaddr_t          fa;
 293        unsigned int            level;
 294        xfs_btnum_t             btnum = XFS_BTNUM_BNOi;
 295
 296        if (!xfs_verify_magic(bp, block->bb_magic))
 297                return __this_address;
 298
 299        if (xfs_has_crc(mp)) {
 300                fa = xfs_btree_sblock_v5hdr_verify(bp);
 301                if (fa)
 302                        return fa;
 303        }
 304
 305        /*
 306         * The perag may not be attached during grow operations or fully
 307         * initialized from the AGF during log recovery. Therefore we can only
 308         * check against maximum tree depth from those contexts.
 309         *
 310         * Otherwise check against the per-tree limit. Peek at one of the
 311         * verifier magic values to determine the type of tree we're verifying
 312         * against.
 313         */
 314        level = be16_to_cpu(block->bb_level);
 315        if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
 316                btnum = XFS_BTNUM_CNTi;
 317        if (pag && pag->pagf_init) {
 318                if (level >= pag->pagf_levels[btnum])
 319                        return __this_address;
 320        } else if (level >= mp->m_alloc_maxlevels)
 321                return __this_address;
 322
 323        return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
 324}
 325
 326static void
 327xfs_allocbt_read_verify(
 328        struct xfs_buf  *bp)
 329{
 330        xfs_failaddr_t  fa;
 331
 332        if (!xfs_btree_sblock_verify_crc(bp))
 333                xfs_verifier_error(bp, -EFSBADCRC, __this_address);
 334        else {
 335                fa = xfs_allocbt_verify(bp);
 336                if (fa)
 337                        xfs_verifier_error(bp, -EFSCORRUPTED, fa);
 338        }
 339
 340        if (bp->b_error)
 341                trace_xfs_btree_corrupt(bp, _RET_IP_);
 342}
 343
 344static void
 345xfs_allocbt_write_verify(
 346        struct xfs_buf  *bp)
 347{
 348        xfs_failaddr_t  fa;
 349
 350        fa = xfs_allocbt_verify(bp);
 351        if (fa) {
 352                trace_xfs_btree_corrupt(bp, _RET_IP_);
 353                xfs_verifier_error(bp, -EFSCORRUPTED, fa);
 354                return;
 355        }
 356        xfs_btree_sblock_calc_crc(bp);
 357
 358}
 359
 360const struct xfs_buf_ops xfs_bnobt_buf_ops = {
 361        .name = "xfs_bnobt",
 362        .magic = { cpu_to_be32(XFS_ABTB_MAGIC),
 363                   cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
 364        .verify_read = xfs_allocbt_read_verify,
 365        .verify_write = xfs_allocbt_write_verify,
 366        .verify_struct = xfs_allocbt_verify,
 367};
 368
 369const struct xfs_buf_ops xfs_cntbt_buf_ops = {
 370        .name = "xfs_cntbt",
 371        .magic = { cpu_to_be32(XFS_ABTC_MAGIC),
 372                   cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
 373        .verify_read = xfs_allocbt_read_verify,
 374        .verify_write = xfs_allocbt_write_verify,
 375        .verify_struct = xfs_allocbt_verify,
 376};
 377
 378STATIC int
 379xfs_bnobt_keys_inorder(
 380        struct xfs_btree_cur            *cur,
 381        const union xfs_btree_key       *k1,
 382        const union xfs_btree_key       *k2)
 383{
 384        return be32_to_cpu(k1->alloc.ar_startblock) <
 385               be32_to_cpu(k2->alloc.ar_startblock);
 386}
 387
 388STATIC int
 389xfs_bnobt_recs_inorder(
 390        struct xfs_btree_cur            *cur,
 391        const union xfs_btree_rec       *r1,
 392        const union xfs_btree_rec       *r2)
 393{
 394        return be32_to_cpu(r1->alloc.ar_startblock) +
 395                be32_to_cpu(r1->alloc.ar_blockcount) <=
 396                be32_to_cpu(r2->alloc.ar_startblock);
 397}
 398
 399STATIC int
 400xfs_cntbt_keys_inorder(
 401        struct xfs_btree_cur            *cur,
 402        const union xfs_btree_key       *k1,
 403        const union xfs_btree_key       *k2)
 404{
 405        return be32_to_cpu(k1->alloc.ar_blockcount) <
 406                be32_to_cpu(k2->alloc.ar_blockcount) ||
 407                (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
 408                 be32_to_cpu(k1->alloc.ar_startblock) <
 409                 be32_to_cpu(k2->alloc.ar_startblock));
 410}
 411
 412STATIC int
 413xfs_cntbt_recs_inorder(
 414        struct xfs_btree_cur            *cur,
 415        const union xfs_btree_rec       *r1,
 416        const union xfs_btree_rec       *r2)
 417{
 418        return be32_to_cpu(r1->alloc.ar_blockcount) <
 419                be32_to_cpu(r2->alloc.ar_blockcount) ||
 420                (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
 421                 be32_to_cpu(r1->alloc.ar_startblock) <
 422                 be32_to_cpu(r2->alloc.ar_startblock));
 423}
 424
 425static const struct xfs_btree_ops xfs_bnobt_ops = {
 426        .rec_len                = sizeof(xfs_alloc_rec_t),
 427        .key_len                = sizeof(xfs_alloc_key_t),
 428
 429        .dup_cursor             = xfs_allocbt_dup_cursor,
 430        .set_root               = xfs_allocbt_set_root,
 431        .alloc_block            = xfs_allocbt_alloc_block,
 432        .free_block             = xfs_allocbt_free_block,
 433        .update_lastrec         = xfs_allocbt_update_lastrec,
 434        .get_minrecs            = xfs_allocbt_get_minrecs,
 435        .get_maxrecs            = xfs_allocbt_get_maxrecs,
 436        .init_key_from_rec      = xfs_allocbt_init_key_from_rec,
 437        .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
 438        .init_rec_from_cur      = xfs_allocbt_init_rec_from_cur,
 439        .init_ptr_from_cur      = xfs_allocbt_init_ptr_from_cur,
 440        .key_diff               = xfs_bnobt_key_diff,
 441        .buf_ops                = &xfs_bnobt_buf_ops,
 442        .diff_two_keys          = xfs_bnobt_diff_two_keys,
 443        .keys_inorder           = xfs_bnobt_keys_inorder,
 444        .recs_inorder           = xfs_bnobt_recs_inorder,
 445};
 446
 447static const struct xfs_btree_ops xfs_cntbt_ops = {
 448        .rec_len                = sizeof(xfs_alloc_rec_t),
 449        .key_len                = sizeof(xfs_alloc_key_t),
 450
 451        .dup_cursor             = xfs_allocbt_dup_cursor,
 452        .set_root               = xfs_allocbt_set_root,
 453        .alloc_block            = xfs_allocbt_alloc_block,
 454        .free_block             = xfs_allocbt_free_block,
 455        .update_lastrec         = xfs_allocbt_update_lastrec,
 456        .get_minrecs            = xfs_allocbt_get_minrecs,
 457        .get_maxrecs            = xfs_allocbt_get_maxrecs,
 458        .init_key_from_rec      = xfs_allocbt_init_key_from_rec,
 459        .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
 460        .init_rec_from_cur      = xfs_allocbt_init_rec_from_cur,
 461        .init_ptr_from_cur      = xfs_allocbt_init_ptr_from_cur,
 462        .key_diff               = xfs_cntbt_key_diff,
 463        .buf_ops                = &xfs_cntbt_buf_ops,
 464        .diff_two_keys          = xfs_cntbt_diff_two_keys,
 465        .keys_inorder           = xfs_cntbt_keys_inorder,
 466        .recs_inorder           = xfs_cntbt_recs_inorder,
 467};
 468
 469/* Allocate most of a new allocation btree cursor. */
 470STATIC struct xfs_btree_cur *
 471xfs_allocbt_init_common(
 472        struct xfs_mount        *mp,
 473        struct xfs_trans        *tp,
 474        struct xfs_perag        *pag,
 475        xfs_btnum_t             btnum)
 476{
 477        struct xfs_btree_cur    *cur;
 478
 479        ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
 480
 481        cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels,
 482                        xfs_allocbt_cur_cache);
 483        cur->bc_ag.abt.active = false;
 484
 485        if (btnum == XFS_BTNUM_CNT) {
 486                cur->bc_ops = &xfs_cntbt_ops;
 487                cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
 488                cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
 489        } else {
 490                cur->bc_ops = &xfs_bnobt_ops;
 491                cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
 492        }
 493
 494        /* take a reference for the cursor */
 495        atomic_inc(&pag->pag_ref);
 496        cur->bc_ag.pag = pag;
 497
 498        if (xfs_has_crc(mp))
 499                cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
 500
 501        return cur;
 502}
 503
 504/*
 505 * Allocate a new allocation btree cursor.
 506 */
 507struct xfs_btree_cur *                  /* new alloc btree cursor */
 508xfs_allocbt_init_cursor(
 509        struct xfs_mount        *mp,            /* file system mount point */
 510        struct xfs_trans        *tp,            /* transaction pointer */
 511        struct xfs_buf          *agbp,          /* buffer for agf structure */
 512        struct xfs_perag        *pag,
 513        xfs_btnum_t             btnum)          /* btree identifier */
 514{
 515        struct xfs_agf          *agf = agbp->b_addr;
 516        struct xfs_btree_cur    *cur;
 517
 518        cur = xfs_allocbt_init_common(mp, tp, pag, btnum);
 519        if (btnum == XFS_BTNUM_CNT)
 520                cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
 521        else
 522                cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
 523
 524        cur->bc_ag.agbp = agbp;
 525
 526        return cur;
 527}
 528
 529/* Create a free space btree cursor with a fake root for staging. */
 530struct xfs_btree_cur *
 531xfs_allocbt_stage_cursor(
 532        struct xfs_mount        *mp,
 533        struct xbtree_afakeroot *afake,
 534        struct xfs_perag        *pag,
 535        xfs_btnum_t             btnum)
 536{
 537        struct xfs_btree_cur    *cur;
 538
 539        cur = xfs_allocbt_init_common(mp, NULL, pag, btnum);
 540        xfs_btree_stage_afakeroot(cur, afake);
 541        return cur;
 542}
 543
 544/*
 545 * Install a new free space btree root.  Caller is responsible for invalidating
 546 * and freeing the old btree blocks.
 547 */
 548void
 549xfs_allocbt_commit_staged_btree(
 550        struct xfs_btree_cur    *cur,
 551        struct xfs_trans        *tp,
 552        struct xfs_buf          *agbp)
 553{
 554        struct xfs_agf          *agf = agbp->b_addr;
 555        struct xbtree_afakeroot *afake = cur->bc_ag.afake;
 556
 557        ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
 558
 559        agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
 560        agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
 561        xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
 562
 563        if (cur->bc_btnum == XFS_BTNUM_BNO) {
 564                xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
 565        } else {
 566                cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
 567                xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
 568        }
 569}
 570
 571/* Calculate number of records in an alloc btree block. */
 572static inline unsigned int
 573xfs_allocbt_block_maxrecs(
 574        unsigned int            blocklen,
 575        bool                    leaf)
 576{
 577        if (leaf)
 578                return blocklen / sizeof(xfs_alloc_rec_t);
 579        return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
 580}
 581
 582/*
 583 * Calculate number of records in an alloc btree block.
 584 */
 585int
 586xfs_allocbt_maxrecs(
 587        struct xfs_mount        *mp,
 588        int                     blocklen,
 589        int                     leaf)
 590{
 591        blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
 592        return xfs_allocbt_block_maxrecs(blocklen, leaf);
 593}
 594
 595/* Free space btrees are at their largest when every other block is free. */
 596#define XFS_MAX_FREESP_RECORDS  ((XFS_MAX_AG_BLOCKS + 1) / 2)
 597
 598/* Compute the max possible height for free space btrees. */
 599unsigned int
 600xfs_allocbt_maxlevels_ondisk(void)
 601{
 602        unsigned int            minrecs[2];
 603        unsigned int            blocklen;
 604
 605        blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
 606                       XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
 607
 608        minrecs[0] = xfs_allocbt_block_maxrecs(blocklen, true) / 2;
 609        minrecs[1] = xfs_allocbt_block_maxrecs(blocklen, false) / 2;
 610
 611        return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_FREESP_RECORDS);
 612}
 613
 614/* Calculate the freespace btree size for some records. */
 615xfs_extlen_t
 616xfs_allocbt_calc_size(
 617        struct xfs_mount        *mp,
 618        unsigned long long      len)
 619{
 620        return xfs_btree_calc_size(mp->m_alloc_mnr, len);
 621}
 622
 623int __init
 624xfs_allocbt_init_cur_cache(void)
 625{
 626        xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur",
 627                        xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()),
 628                        0, 0, NULL);
 629
 630        if (!xfs_allocbt_cur_cache)
 631                return -ENOMEM;
 632        return 0;
 633}
 634
 635void
 636xfs_allocbt_destroy_cur_cache(void)
 637{
 638        kmem_cache_destroy(xfs_allocbt_cur_cache);
 639        xfs_allocbt_cur_cache = NULL;
 640}
 641