linux/fs/xfs/libxfs/xfs_ag.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * Copyright (c) 2018 Red Hat, Inc.
   5 * All rights reserved.
   6 */
   7
   8#include "xfs.h"
   9#include "xfs_fs.h"
  10#include "xfs_shared.h"
  11#include "xfs_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_bit.h"
  14#include "xfs_sb.h"
  15#include "xfs_mount.h"
  16#include "xfs_btree.h"
  17#include "xfs_alloc_btree.h"
  18#include "xfs_rmap_btree.h"
  19#include "xfs_alloc.h"
  20#include "xfs_ialloc.h"
  21#include "xfs_rmap.h"
  22#include "xfs_ag.h"
  23#include "xfs_ag_resv.h"
  24#include "xfs_health.h"
  25
  26static struct xfs_buf *
  27xfs_get_aghdr_buf(
  28        struct xfs_mount        *mp,
  29        xfs_daddr_t             blkno,
  30        size_t                  numblks,
  31        const struct xfs_buf_ops *ops)
  32{
  33        struct xfs_buf          *bp;
  34
  35        bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0);
  36        if (!bp)
  37                return NULL;
  38
  39        xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
  40        bp->b_bn = blkno;
  41        bp->b_maps[0].bm_bn = blkno;
  42        bp->b_ops = ops;
  43
  44        return bp;
  45}
  46
  47static inline bool is_log_ag(struct xfs_mount *mp, struct aghdr_init_data *id)
  48{
  49        return mp->m_sb.sb_logstart > 0 &&
  50               id->agno == XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart);
  51}
  52
  53/*
  54 * Generic btree root block init function
  55 */
  56static void
  57xfs_btroot_init(
  58        struct xfs_mount        *mp,
  59        struct xfs_buf          *bp,
  60        struct aghdr_init_data  *id)
  61{
  62        xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno);
  63}
  64
  65/* Finish initializing a free space btree. */
  66static void
  67xfs_freesp_init_recs(
  68        struct xfs_mount        *mp,
  69        struct xfs_buf          *bp,
  70        struct aghdr_init_data  *id)
  71{
  72        struct xfs_alloc_rec    *arec;
  73        struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
  74
  75        arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
  76        arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
  77
  78        if (is_log_ag(mp, id)) {
  79                struct xfs_alloc_rec    *nrec;
  80                xfs_agblock_t           start = XFS_FSB_TO_AGBNO(mp,
  81                                                        mp->m_sb.sb_logstart);
  82
  83                ASSERT(start >= mp->m_ag_prealloc_blocks);
  84                if (start != mp->m_ag_prealloc_blocks) {
  85                        /*
  86                         * Modify first record to pad stripe align of log
  87                         */
  88                        arec->ar_blockcount = cpu_to_be32(start -
  89                                                mp->m_ag_prealloc_blocks);
  90                        nrec = arec + 1;
  91
  92                        /*
  93                         * Insert second record at start of internal log
  94                         * which then gets trimmed.
  95                         */
  96                        nrec->ar_startblock = cpu_to_be32(
  97                                        be32_to_cpu(arec->ar_startblock) +
  98                                        be32_to_cpu(arec->ar_blockcount));
  99                        arec = nrec;
 100                        be16_add_cpu(&block->bb_numrecs, 1);
 101                }
 102                /*
 103                 * Change record start to after the internal log
 104                 */
 105                be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks);
 106        }
 107
 108        /*
 109         * Calculate the record block count and check for the case where
 110         * the log might have consumed all available space in the AG. If
 111         * so, reset the record count to 0 to avoid exposure of an invalid
 112         * record start block.
 113         */
 114        arec->ar_blockcount = cpu_to_be32(id->agsize -
 115                                          be32_to_cpu(arec->ar_startblock));
 116        if (!arec->ar_blockcount)
 117                block->bb_numrecs = 0;
 118}
 119
 120/*
 121 * Alloc btree root block init functions
 122 */
 123static void
 124xfs_bnoroot_init(
 125        struct xfs_mount        *mp,
 126        struct xfs_buf          *bp,
 127        struct aghdr_init_data  *id)
 128{
 129        xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno);
 130        xfs_freesp_init_recs(mp, bp, id);
 131}
 132
 133static void
 134xfs_cntroot_init(
 135        struct xfs_mount        *mp,
 136        struct xfs_buf          *bp,
 137        struct aghdr_init_data  *id)
 138{
 139        xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno);
 140        xfs_freesp_init_recs(mp, bp, id);
 141}
 142
 143/*
 144 * Reverse map root block init
 145 */
 146static void
 147xfs_rmaproot_init(
 148        struct xfs_mount        *mp,
 149        struct xfs_buf          *bp,
 150        struct aghdr_init_data  *id)
 151{
 152        struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
 153        struct xfs_rmap_rec     *rrec;
 154
 155        xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno);
 156
 157        /*
 158         * mark the AG header regions as static metadata The BNO
 159         * btree block is the first block after the headers, so
 160         * it's location defines the size of region the static
 161         * metadata consumes.
 162         *
 163         * Note: unlike mkfs, we never have to account for log
 164         * space when growing the data regions
 165         */
 166        rrec = XFS_RMAP_REC_ADDR(block, 1);
 167        rrec->rm_startblock = 0;
 168        rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
 169        rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
 170        rrec->rm_offset = 0;
 171
 172        /* account freespace btree root blocks */
 173        rrec = XFS_RMAP_REC_ADDR(block, 2);
 174        rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
 175        rrec->rm_blockcount = cpu_to_be32(2);
 176        rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
 177        rrec->rm_offset = 0;
 178
 179        /* account inode btree root blocks */
 180        rrec = XFS_RMAP_REC_ADDR(block, 3);
 181        rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
 182        rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
 183                                          XFS_IBT_BLOCK(mp));
 184        rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
 185        rrec->rm_offset = 0;
 186
 187        /* account for rmap btree root */
 188        rrec = XFS_RMAP_REC_ADDR(block, 4);
 189        rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
 190        rrec->rm_blockcount = cpu_to_be32(1);
 191        rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
 192        rrec->rm_offset = 0;
 193
 194        /* account for refc btree root */
 195        if (xfs_sb_version_hasreflink(&mp->m_sb)) {
 196                rrec = XFS_RMAP_REC_ADDR(block, 5);
 197                rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
 198                rrec->rm_blockcount = cpu_to_be32(1);
 199                rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
 200                rrec->rm_offset = 0;
 201                be16_add_cpu(&block->bb_numrecs, 1);
 202        }
 203
 204        /* account for the log space */
 205        if (is_log_ag(mp, id)) {
 206                rrec = XFS_RMAP_REC_ADDR(block,
 207                                be16_to_cpu(block->bb_numrecs) + 1);
 208                rrec->rm_startblock = cpu_to_be32(
 209                                XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart));
 210                rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks);
 211                rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG);
 212                rrec->rm_offset = 0;
 213                be16_add_cpu(&block->bb_numrecs, 1);
 214        }
 215}
 216
 217/*
 218 * Initialise new secondary superblocks with the pre-grow geometry, but mark
 219 * them as "in progress" so we know they haven't yet been activated. This will
 220 * get cleared when the update with the new geometry information is done after
 221 * changes to the primary are committed. This isn't strictly necessary, but we
 222 * get it for free with the delayed buffer write lists and it means we can tell
 223 * if a grow operation didn't complete properly after the fact.
 224 */
 225static void
 226xfs_sbblock_init(
 227        struct xfs_mount        *mp,
 228        struct xfs_buf          *bp,
 229        struct aghdr_init_data  *id)
 230{
 231        struct xfs_dsb          *dsb = XFS_BUF_TO_SBP(bp);
 232
 233        xfs_sb_to_disk(dsb, &mp->m_sb);
 234        dsb->sb_inprogress = 1;
 235}
 236
 237static void
 238xfs_agfblock_init(
 239        struct xfs_mount        *mp,
 240        struct xfs_buf          *bp,
 241        struct aghdr_init_data  *id)
 242{
 243        struct xfs_agf          *agf = XFS_BUF_TO_AGF(bp);
 244        xfs_extlen_t            tmpsize;
 245
 246        agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
 247        agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
 248        agf->agf_seqno = cpu_to_be32(id->agno);
 249        agf->agf_length = cpu_to_be32(id->agsize);
 250        agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
 251        agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
 252        agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
 253        agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
 254        if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
 255                agf->agf_roots[XFS_BTNUM_RMAPi] =
 256                                        cpu_to_be32(XFS_RMAP_BLOCK(mp));
 257                agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
 258                agf->agf_rmap_blocks = cpu_to_be32(1);
 259        }
 260
 261        agf->agf_flfirst = cpu_to_be32(1);
 262        agf->agf_fllast = 0;
 263        agf->agf_flcount = 0;
 264        tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
 265        agf->agf_freeblks = cpu_to_be32(tmpsize);
 266        agf->agf_longest = cpu_to_be32(tmpsize);
 267        if (xfs_sb_version_hascrc(&mp->m_sb))
 268                uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
 269        if (xfs_sb_version_hasreflink(&mp->m_sb)) {
 270                agf->agf_refcount_root = cpu_to_be32(
 271                                xfs_refc_block(mp));
 272                agf->agf_refcount_level = cpu_to_be32(1);
 273                agf->agf_refcount_blocks = cpu_to_be32(1);
 274        }
 275
 276        if (is_log_ag(mp, id)) {
 277                int64_t logblocks = mp->m_sb.sb_logblocks;
 278
 279                be32_add_cpu(&agf->agf_freeblks, -logblocks);
 280                agf->agf_longest = cpu_to_be32(id->agsize -
 281                        XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks);
 282        }
 283}
 284
 285static void
 286xfs_agflblock_init(
 287        struct xfs_mount        *mp,
 288        struct xfs_buf          *bp,
 289        struct aghdr_init_data  *id)
 290{
 291        struct xfs_agfl         *agfl = XFS_BUF_TO_AGFL(bp);
 292        __be32                  *agfl_bno;
 293        int                     bucket;
 294
 295        if (xfs_sb_version_hascrc(&mp->m_sb)) {
 296                agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
 297                agfl->agfl_seqno = cpu_to_be32(id->agno);
 298                uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
 299        }
 300
 301        agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
 302        for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
 303                agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
 304}
 305
 306static void
 307xfs_agiblock_init(
 308        struct xfs_mount        *mp,
 309        struct xfs_buf          *bp,
 310        struct aghdr_init_data  *id)
 311{
 312        struct xfs_agi          *agi = XFS_BUF_TO_AGI(bp);
 313        int                     bucket;
 314
 315        agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
 316        agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
 317        agi->agi_seqno = cpu_to_be32(id->agno);
 318        agi->agi_length = cpu_to_be32(id->agsize);
 319        agi->agi_count = 0;
 320        agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
 321        agi->agi_level = cpu_to_be32(1);
 322        agi->agi_freecount = 0;
 323        agi->agi_newino = cpu_to_be32(NULLAGINO);
 324        agi->agi_dirino = cpu_to_be32(NULLAGINO);
 325        if (xfs_sb_version_hascrc(&mp->m_sb))
 326                uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
 327        if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
 328                agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
 329                agi->agi_free_level = cpu_to_be32(1);
 330        }
 331        for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
 332                agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
 333}
 334
 335typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp,
 336                                  struct aghdr_init_data *id);
 337static int
 338xfs_ag_init_hdr(
 339        struct xfs_mount        *mp,
 340        struct aghdr_init_data  *id,
 341        aghdr_init_work_f       work,
 342        const struct xfs_buf_ops *ops)
 343
 344{
 345        struct xfs_buf          *bp;
 346
 347        bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, ops);
 348        if (!bp)
 349                return -ENOMEM;
 350
 351        (*work)(mp, bp, id);
 352
 353        xfs_buf_delwri_queue(bp, &id->buffer_list);
 354        xfs_buf_relse(bp);
 355        return 0;
 356}
 357
 358struct xfs_aghdr_grow_data {
 359        xfs_daddr_t             daddr;
 360        size_t                  numblks;
 361        const struct xfs_buf_ops *ops;
 362        aghdr_init_work_f       work;
 363        xfs_btnum_t             type;
 364        bool                    need_init;
 365};
 366
 367/*
 368 * Prepare new AG headers to be written to disk. We use uncached buffers here,
 369 * as it is assumed these new AG headers are currently beyond the currently
 370 * valid filesystem address space. Using cached buffers would trip over EOFS
 371 * corruption detection alogrithms in the buffer cache lookup routines.
 372 *
 373 * This is a non-transactional function, but the prepared buffers are added to a
 374 * delayed write buffer list supplied by the caller so they can submit them to
 375 * disk and wait on them as required.
 376 */
 377int
 378xfs_ag_init_headers(
 379        struct xfs_mount        *mp,
 380        struct aghdr_init_data  *id)
 381
 382{
 383        struct xfs_aghdr_grow_data aghdr_data[] = {
 384        { /* SB */
 385                .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR),
 386                .numblks = XFS_FSS_TO_BB(mp, 1),
 387                .ops = &xfs_sb_buf_ops,
 388                .work = &xfs_sbblock_init,
 389                .need_init = true
 390        },
 391        { /* AGF */
 392                .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)),
 393                .numblks = XFS_FSS_TO_BB(mp, 1),
 394                .ops = &xfs_agf_buf_ops,
 395                .work = &xfs_agfblock_init,
 396                .need_init = true
 397        },
 398        { /* AGFL */
 399                .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)),
 400                .numblks = XFS_FSS_TO_BB(mp, 1),
 401                .ops = &xfs_agfl_buf_ops,
 402                .work = &xfs_agflblock_init,
 403                .need_init = true
 404        },
 405        { /* AGI */
 406                .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)),
 407                .numblks = XFS_FSS_TO_BB(mp, 1),
 408                .ops = &xfs_agi_buf_ops,
 409                .work = &xfs_agiblock_init,
 410                .need_init = true
 411        },
 412        { /* BNO root block */
 413                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
 414                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 415                .ops = &xfs_bnobt_buf_ops,
 416                .work = &xfs_bnoroot_init,
 417                .need_init = true
 418        },
 419        { /* CNT root block */
 420                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
 421                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 422                .ops = &xfs_cntbt_buf_ops,
 423                .work = &xfs_cntroot_init,
 424                .need_init = true
 425        },
 426        { /* INO root block */
 427                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)),
 428                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 429                .ops = &xfs_inobt_buf_ops,
 430                .work = &xfs_btroot_init,
 431                .type = XFS_BTNUM_INO,
 432                .need_init = true
 433        },
 434        { /* FINO root block */
 435                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
 436                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 437                .ops = &xfs_finobt_buf_ops,
 438                .work = &xfs_btroot_init,
 439                .type = XFS_BTNUM_FINO,
 440                .need_init =  xfs_sb_version_hasfinobt(&mp->m_sb)
 441        },
 442        { /* RMAP root block */
 443                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
 444                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 445                .ops = &xfs_rmapbt_buf_ops,
 446                .work = &xfs_rmaproot_init,
 447                .need_init = xfs_sb_version_hasrmapbt(&mp->m_sb)
 448        },
 449        { /* REFC root block */
 450                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
 451                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 452                .ops = &xfs_refcountbt_buf_ops,
 453                .work = &xfs_btroot_init,
 454                .type = XFS_BTNUM_REFC,
 455                .need_init = xfs_sb_version_hasreflink(&mp->m_sb)
 456        },
 457        { /* NULL terminating block */
 458                .daddr = XFS_BUF_DADDR_NULL,
 459        }
 460        };
 461        struct  xfs_aghdr_grow_data *dp;
 462        int                     error = 0;
 463
 464        /* Account for AG free space in new AG */
 465        id->nfree += id->agsize - mp->m_ag_prealloc_blocks;
 466        for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) {
 467                if (!dp->need_init)
 468                        continue;
 469
 470                id->daddr = dp->daddr;
 471                id->numblks = dp->numblks;
 472                id->type = dp->type;
 473                error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
 474                if (error)
 475                        break;
 476        }
 477        return error;
 478}
 479
 480/*
 481 * Extent the AG indicated by the @id by the length passed in
 482 */
 483int
 484xfs_ag_extend_space(
 485        struct xfs_mount        *mp,
 486        struct xfs_trans        *tp,
 487        struct aghdr_init_data  *id,
 488        xfs_extlen_t            len)
 489{
 490        struct xfs_buf          *bp;
 491        struct xfs_agi          *agi;
 492        struct xfs_agf          *agf;
 493        int                     error;
 494
 495        /*
 496         * Change the agi length.
 497         */
 498        error = xfs_ialloc_read_agi(mp, tp, id->agno, &bp);
 499        if (error)
 500                return error;
 501
 502        agi = XFS_BUF_TO_AGI(bp);
 503        be32_add_cpu(&agi->agi_length, len);
 504        ASSERT(id->agno == mp->m_sb.sb_agcount - 1 ||
 505               be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
 506        xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
 507
 508        /*
 509         * Change agf length.
 510         */
 511        error = xfs_alloc_read_agf(mp, tp, id->agno, 0, &bp);
 512        if (error)
 513                return error;
 514
 515        agf = XFS_BUF_TO_AGF(bp);
 516        be32_add_cpu(&agf->agf_length, len);
 517        ASSERT(agf->agf_length == agi->agi_length);
 518        xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
 519
 520        /*
 521         * Free the new space.
 522         *
 523         * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
 524         * this doesn't actually exist in the rmap btree.
 525         */
 526        error = xfs_rmap_free(tp, bp, id->agno,
 527                                be32_to_cpu(agf->agf_length) - len,
 528                                len, &XFS_RMAP_OINFO_SKIP_UPDATE);
 529        if (error)
 530                return error;
 531
 532        return  xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, id->agno,
 533                                        be32_to_cpu(agf->agf_length) - len),
 534                                len, &XFS_RMAP_OINFO_SKIP_UPDATE,
 535                                XFS_AG_RESV_NONE);
 536}
 537
 538/* Retrieve AG geometry. */
 539int
 540xfs_ag_get_geometry(
 541        struct xfs_mount        *mp,
 542        xfs_agnumber_t          agno,
 543        struct xfs_ag_geometry  *ageo)
 544{
 545        struct xfs_buf          *agi_bp;
 546        struct xfs_buf          *agf_bp;
 547        struct xfs_agi          *agi;
 548        struct xfs_agf          *agf;
 549        struct xfs_perag        *pag;
 550        unsigned int            freeblks;
 551        int                     error;
 552
 553        if (agno >= mp->m_sb.sb_agcount)
 554                return -EINVAL;
 555
 556        /* Lock the AG headers. */
 557        error = xfs_ialloc_read_agi(mp, NULL, agno, &agi_bp);
 558        if (error)
 559                return error;
 560        error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agf_bp);
 561        if (error)
 562                goto out_agi;
 563        pag = xfs_perag_get(mp, agno);
 564
 565        /* Fill out form. */
 566        memset(ageo, 0, sizeof(*ageo));
 567        ageo->ag_number = agno;
 568
 569        agi = XFS_BUF_TO_AGI(agi_bp);
 570        ageo->ag_icount = be32_to_cpu(agi->agi_count);
 571        ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
 572
 573        agf = XFS_BUF_TO_AGF(agf_bp);
 574        ageo->ag_length = be32_to_cpu(agf->agf_length);
 575        freeblks = pag->pagf_freeblks +
 576                   pag->pagf_flcount +
 577                   pag->pagf_btreeblks -
 578                   xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE);
 579        ageo->ag_freeblks = freeblks;
 580        xfs_ag_geom_health(pag, ageo);
 581
 582        /* Release resources. */
 583        xfs_perag_put(pag);
 584        xfs_buf_relse(agf_bp);
 585out_agi:
 586        xfs_buf_relse(agi_bp);
 587        return error;
 588}
 589