linux/fs/xfs/libxfs/xfs_ag.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * Copyright (c) 2018 Red Hat, Inc.
   5 * All rights reserved.
   6 */
   7
   8#include "xfs.h"
   9#include "xfs_fs.h"
  10#include "xfs_shared.h"
  11#include "xfs_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_bit.h"
  14#include "xfs_sb.h"
  15#include "xfs_mount.h"
  16#include "xfs_btree.h"
  17#include "xfs_alloc_btree.h"
  18#include "xfs_rmap_btree.h"
  19#include "xfs_alloc.h"
  20#include "xfs_ialloc.h"
  21#include "xfs_rmap.h"
  22#include "xfs_ag.h"
  23#include "xfs_ag_resv.h"
  24#include "xfs_health.h"
  25
  26static struct xfs_buf *
  27xfs_get_aghdr_buf(
  28        struct xfs_mount        *mp,
  29        xfs_daddr_t             blkno,
  30        size_t                  numblks,
  31        int                     flags,
  32        const struct xfs_buf_ops *ops)
  33{
  34        struct xfs_buf          *bp;
  35
  36        bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
  37        if (!bp)
  38                return NULL;
  39
  40        xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
  41        bp->b_bn = blkno;
  42        bp->b_maps[0].bm_bn = blkno;
  43        bp->b_ops = ops;
  44
  45        return bp;
  46}
  47
  48static inline bool is_log_ag(struct xfs_mount *mp, struct aghdr_init_data *id)
  49{
  50        return mp->m_sb.sb_logstart > 0 &&
  51               id->agno == XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart);
  52}
  53
  54/*
  55 * Generic btree root block init function
  56 */
  57static void
  58xfs_btroot_init(
  59        struct xfs_mount        *mp,
  60        struct xfs_buf          *bp,
  61        struct aghdr_init_data  *id)
  62{
  63        xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno);
  64}
  65
  66/* Finish initializing a free space btree. */
  67static void
  68xfs_freesp_init_recs(
  69        struct xfs_mount        *mp,
  70        struct xfs_buf          *bp,
  71        struct aghdr_init_data  *id)
  72{
  73        struct xfs_alloc_rec    *arec;
  74        struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
  75
  76        arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
  77        arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
  78
  79        if (is_log_ag(mp, id)) {
  80                struct xfs_alloc_rec    *nrec;
  81                xfs_agblock_t           start = XFS_FSB_TO_AGBNO(mp,
  82                                                        mp->m_sb.sb_logstart);
  83
  84                ASSERT(start >= mp->m_ag_prealloc_blocks);
  85                if (start != mp->m_ag_prealloc_blocks) {
  86                        /*
  87                         * Modify first record to pad stripe align of log
  88                         */
  89                        arec->ar_blockcount = cpu_to_be32(start -
  90                                                mp->m_ag_prealloc_blocks);
  91                        nrec = arec + 1;
  92
  93                        /*
  94                         * Insert second record at start of internal log
  95                         * which then gets trimmed.
  96                         */
  97                        nrec->ar_startblock = cpu_to_be32(
  98                                        be32_to_cpu(arec->ar_startblock) +
  99                                        be32_to_cpu(arec->ar_blockcount));
 100                        arec = nrec;
 101                        be16_add_cpu(&block->bb_numrecs, 1);
 102                }
 103                /*
 104                 * Change record start to after the internal log
 105                 */
 106                be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks);
 107        }
 108
 109        /*
 110         * Calculate the record block count and check for the case where
 111         * the log might have consumed all available space in the AG. If
 112         * so, reset the record count to 0 to avoid exposure of an invalid
 113         * record start block.
 114         */
 115        arec->ar_blockcount = cpu_to_be32(id->agsize -
 116                                          be32_to_cpu(arec->ar_startblock));
 117        if (!arec->ar_blockcount)
 118                block->bb_numrecs = 0;
 119}
 120
 121/*
 122 * Alloc btree root block init functions
 123 */
 124static void
 125xfs_bnoroot_init(
 126        struct xfs_mount        *mp,
 127        struct xfs_buf          *bp,
 128        struct aghdr_init_data  *id)
 129{
 130        xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno);
 131        xfs_freesp_init_recs(mp, bp, id);
 132}
 133
 134static void
 135xfs_cntroot_init(
 136        struct xfs_mount        *mp,
 137        struct xfs_buf          *bp,
 138        struct aghdr_init_data  *id)
 139{
 140        xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno);
 141        xfs_freesp_init_recs(mp, bp, id);
 142}
 143
 144/*
 145 * Reverse map root block init
 146 */
 147static void
 148xfs_rmaproot_init(
 149        struct xfs_mount        *mp,
 150        struct xfs_buf          *bp,
 151        struct aghdr_init_data  *id)
 152{
 153        struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
 154        struct xfs_rmap_rec     *rrec;
 155
 156        xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno);
 157
 158        /*
 159         * mark the AG header regions as static metadata The BNO
 160         * btree block is the first block after the headers, so
 161         * it's location defines the size of region the static
 162         * metadata consumes.
 163         *
 164         * Note: unlike mkfs, we never have to account for log
 165         * space when growing the data regions
 166         */
 167        rrec = XFS_RMAP_REC_ADDR(block, 1);
 168        rrec->rm_startblock = 0;
 169        rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
 170        rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
 171        rrec->rm_offset = 0;
 172
 173        /* account freespace btree root blocks */
 174        rrec = XFS_RMAP_REC_ADDR(block, 2);
 175        rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
 176        rrec->rm_blockcount = cpu_to_be32(2);
 177        rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
 178        rrec->rm_offset = 0;
 179
 180        /* account inode btree root blocks */
 181        rrec = XFS_RMAP_REC_ADDR(block, 3);
 182        rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
 183        rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
 184                                          XFS_IBT_BLOCK(mp));
 185        rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
 186        rrec->rm_offset = 0;
 187
 188        /* account for rmap btree root */
 189        rrec = XFS_RMAP_REC_ADDR(block, 4);
 190        rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
 191        rrec->rm_blockcount = cpu_to_be32(1);
 192        rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
 193        rrec->rm_offset = 0;
 194
 195        /* account for refc btree root */
 196        if (xfs_sb_version_hasreflink(&mp->m_sb)) {
 197                rrec = XFS_RMAP_REC_ADDR(block, 5);
 198                rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
 199                rrec->rm_blockcount = cpu_to_be32(1);
 200                rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
 201                rrec->rm_offset = 0;
 202                be16_add_cpu(&block->bb_numrecs, 1);
 203        }
 204
 205        /* account for the log space */
 206        if (is_log_ag(mp, id)) {
 207                rrec = XFS_RMAP_REC_ADDR(block,
 208                                be16_to_cpu(block->bb_numrecs) + 1);
 209                rrec->rm_startblock = cpu_to_be32(
 210                                XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart));
 211                rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks);
 212                rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG);
 213                rrec->rm_offset = 0;
 214                be16_add_cpu(&block->bb_numrecs, 1);
 215        }
 216}
 217
 218/*
 219 * Initialise new secondary superblocks with the pre-grow geometry, but mark
 220 * them as "in progress" so we know they haven't yet been activated. This will
 221 * get cleared when the update with the new geometry information is done after
 222 * changes to the primary are committed. This isn't strictly necessary, but we
 223 * get it for free with the delayed buffer write lists and it means we can tell
 224 * if a grow operation didn't complete properly after the fact.
 225 */
 226static void
 227xfs_sbblock_init(
 228        struct xfs_mount        *mp,
 229        struct xfs_buf          *bp,
 230        struct aghdr_init_data  *id)
 231{
 232        struct xfs_dsb          *dsb = XFS_BUF_TO_SBP(bp);
 233
 234        xfs_sb_to_disk(dsb, &mp->m_sb);
 235        dsb->sb_inprogress = 1;
 236}
 237
 238static void
 239xfs_agfblock_init(
 240        struct xfs_mount        *mp,
 241        struct xfs_buf          *bp,
 242        struct aghdr_init_data  *id)
 243{
 244        struct xfs_agf          *agf = XFS_BUF_TO_AGF(bp);
 245        xfs_extlen_t            tmpsize;
 246
 247        agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
 248        agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
 249        agf->agf_seqno = cpu_to_be32(id->agno);
 250        agf->agf_length = cpu_to_be32(id->agsize);
 251        agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
 252        agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
 253        agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
 254        agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
 255        if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
 256                agf->agf_roots[XFS_BTNUM_RMAPi] =
 257                                        cpu_to_be32(XFS_RMAP_BLOCK(mp));
 258                agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
 259                agf->agf_rmap_blocks = cpu_to_be32(1);
 260        }
 261
 262        agf->agf_flfirst = cpu_to_be32(1);
 263        agf->agf_fllast = 0;
 264        agf->agf_flcount = 0;
 265        tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
 266        agf->agf_freeblks = cpu_to_be32(tmpsize);
 267        agf->agf_longest = cpu_to_be32(tmpsize);
 268        if (xfs_sb_version_hascrc(&mp->m_sb))
 269                uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
 270        if (xfs_sb_version_hasreflink(&mp->m_sb)) {
 271                agf->agf_refcount_root = cpu_to_be32(
 272                                xfs_refc_block(mp));
 273                agf->agf_refcount_level = cpu_to_be32(1);
 274                agf->agf_refcount_blocks = cpu_to_be32(1);
 275        }
 276
 277        if (is_log_ag(mp, id)) {
 278                int64_t logblocks = mp->m_sb.sb_logblocks;
 279
 280                be32_add_cpu(&agf->agf_freeblks, -logblocks);
 281                agf->agf_longest = cpu_to_be32(id->agsize -
 282                        XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks);
 283        }
 284}
 285
 286static void
 287xfs_agflblock_init(
 288        struct xfs_mount        *mp,
 289        struct xfs_buf          *bp,
 290        struct aghdr_init_data  *id)
 291{
 292        struct xfs_agfl         *agfl = XFS_BUF_TO_AGFL(bp);
 293        __be32                  *agfl_bno;
 294        int                     bucket;
 295
 296        if (xfs_sb_version_hascrc(&mp->m_sb)) {
 297                agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
 298                agfl->agfl_seqno = cpu_to_be32(id->agno);
 299                uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
 300        }
 301
 302        agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
 303        for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
 304                agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
 305}
 306
 307static void
 308xfs_agiblock_init(
 309        struct xfs_mount        *mp,
 310        struct xfs_buf          *bp,
 311        struct aghdr_init_data  *id)
 312{
 313        struct xfs_agi          *agi = XFS_BUF_TO_AGI(bp);
 314        int                     bucket;
 315
 316        agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
 317        agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
 318        agi->agi_seqno = cpu_to_be32(id->agno);
 319        agi->agi_length = cpu_to_be32(id->agsize);
 320        agi->agi_count = 0;
 321        agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
 322        agi->agi_level = cpu_to_be32(1);
 323        agi->agi_freecount = 0;
 324        agi->agi_newino = cpu_to_be32(NULLAGINO);
 325        agi->agi_dirino = cpu_to_be32(NULLAGINO);
 326        if (xfs_sb_version_hascrc(&mp->m_sb))
 327                uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
 328        if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
 329                agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
 330                agi->agi_free_level = cpu_to_be32(1);
 331        }
 332        for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
 333                agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
 334}
 335
 336typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp,
 337                                  struct aghdr_init_data *id);
 338static int
 339xfs_ag_init_hdr(
 340        struct xfs_mount        *mp,
 341        struct aghdr_init_data  *id,
 342        aghdr_init_work_f       work,
 343        const struct xfs_buf_ops *ops)
 344
 345{
 346        struct xfs_buf          *bp;
 347
 348        bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, 0, ops);
 349        if (!bp)
 350                return -ENOMEM;
 351
 352        (*work)(mp, bp, id);
 353
 354        xfs_buf_delwri_queue(bp, &id->buffer_list);
 355        xfs_buf_relse(bp);
 356        return 0;
 357}
 358
 359struct xfs_aghdr_grow_data {
 360        xfs_daddr_t             daddr;
 361        size_t                  numblks;
 362        const struct xfs_buf_ops *ops;
 363        aghdr_init_work_f       work;
 364        xfs_btnum_t             type;
 365        bool                    need_init;
 366};
 367
 368/*
 369 * Prepare new AG headers to be written to disk. We use uncached buffers here,
 370 * as it is assumed these new AG headers are currently beyond the currently
 371 * valid filesystem address space. Using cached buffers would trip over EOFS
 372 * corruption detection alogrithms in the buffer cache lookup routines.
 373 *
 374 * This is a non-transactional function, but the prepared buffers are added to a
 375 * delayed write buffer list supplied by the caller so they can submit them to
 376 * disk and wait on them as required.
 377 */
 378int
 379xfs_ag_init_headers(
 380        struct xfs_mount        *mp,
 381        struct aghdr_init_data  *id)
 382
 383{
 384        struct xfs_aghdr_grow_data aghdr_data[] = {
 385        { /* SB */
 386                .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR),
 387                .numblks = XFS_FSS_TO_BB(mp, 1),
 388                .ops = &xfs_sb_buf_ops,
 389                .work = &xfs_sbblock_init,
 390                .need_init = true
 391        },
 392        { /* AGF */
 393                .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)),
 394                .numblks = XFS_FSS_TO_BB(mp, 1),
 395                .ops = &xfs_agf_buf_ops,
 396                .work = &xfs_agfblock_init,
 397                .need_init = true
 398        },
 399        { /* AGFL */
 400                .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)),
 401                .numblks = XFS_FSS_TO_BB(mp, 1),
 402                .ops = &xfs_agfl_buf_ops,
 403                .work = &xfs_agflblock_init,
 404                .need_init = true
 405        },
 406        { /* AGI */
 407                .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)),
 408                .numblks = XFS_FSS_TO_BB(mp, 1),
 409                .ops = &xfs_agi_buf_ops,
 410                .work = &xfs_agiblock_init,
 411                .need_init = true
 412        },
 413        { /* BNO root block */
 414                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
 415                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 416                .ops = &xfs_bnobt_buf_ops,
 417                .work = &xfs_bnoroot_init,
 418                .need_init = true
 419        },
 420        { /* CNT root block */
 421                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
 422                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 423                .ops = &xfs_cntbt_buf_ops,
 424                .work = &xfs_cntroot_init,
 425                .need_init = true
 426        },
 427        { /* INO root block */
 428                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)),
 429                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 430                .ops = &xfs_inobt_buf_ops,
 431                .work = &xfs_btroot_init,
 432                .type = XFS_BTNUM_INO,
 433                .need_init = true
 434        },
 435        { /* FINO root block */
 436                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
 437                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 438                .ops = &xfs_finobt_buf_ops,
 439                .work = &xfs_btroot_init,
 440                .type = XFS_BTNUM_FINO,
 441                .need_init =  xfs_sb_version_hasfinobt(&mp->m_sb)
 442        },
 443        { /* RMAP root block */
 444                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
 445                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 446                .ops = &xfs_rmapbt_buf_ops,
 447                .work = &xfs_rmaproot_init,
 448                .need_init = xfs_sb_version_hasrmapbt(&mp->m_sb)
 449        },
 450        { /* REFC root block */
 451                .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
 452                .numblks = BTOBB(mp->m_sb.sb_blocksize),
 453                .ops = &xfs_refcountbt_buf_ops,
 454                .work = &xfs_btroot_init,
 455                .type = XFS_BTNUM_REFC,
 456                .need_init = xfs_sb_version_hasreflink(&mp->m_sb)
 457        },
 458        { /* NULL terminating block */
 459                .daddr = XFS_BUF_DADDR_NULL,
 460        }
 461        };
 462        struct  xfs_aghdr_grow_data *dp;
 463        int                     error = 0;
 464
 465        /* Account for AG free space in new AG */
 466        id->nfree += id->agsize - mp->m_ag_prealloc_blocks;
 467        for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) {
 468                if (!dp->need_init)
 469                        continue;
 470
 471                id->daddr = dp->daddr;
 472                id->numblks = dp->numblks;
 473                id->type = dp->type;
 474                error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
 475                if (error)
 476                        break;
 477        }
 478        return error;
 479}
 480
 481/*
 482 * Extent the AG indicated by the @id by the length passed in
 483 */
 484int
 485xfs_ag_extend_space(
 486        struct xfs_mount        *mp,
 487        struct xfs_trans        *tp,
 488        struct aghdr_init_data  *id,
 489        xfs_extlen_t            len)
 490{
 491        struct xfs_buf          *bp;
 492        struct xfs_agi          *agi;
 493        struct xfs_agf          *agf;
 494        int                     error;
 495
 496        /*
 497         * Change the agi length.
 498         */
 499        error = xfs_ialloc_read_agi(mp, tp, id->agno, &bp);
 500        if (error)
 501                return error;
 502
 503        agi = XFS_BUF_TO_AGI(bp);
 504        be32_add_cpu(&agi->agi_length, len);
 505        ASSERT(id->agno == mp->m_sb.sb_agcount - 1 ||
 506               be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
 507        xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
 508
 509        /*
 510         * Change agf length.
 511         */
 512        error = xfs_alloc_read_agf(mp, tp, id->agno, 0, &bp);
 513        if (error)
 514                return error;
 515
 516        agf = XFS_BUF_TO_AGF(bp);
 517        be32_add_cpu(&agf->agf_length, len);
 518        ASSERT(agf->agf_length == agi->agi_length);
 519        xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
 520
 521        /*
 522         * Free the new space.
 523         *
 524         * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
 525         * this doesn't actually exist in the rmap btree.
 526         */
 527        error = xfs_rmap_free(tp, bp, id->agno,
 528                                be32_to_cpu(agf->agf_length) - len,
 529                                len, &XFS_RMAP_OINFO_SKIP_UPDATE);
 530        if (error)
 531                return error;
 532
 533        return  xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, id->agno,
 534                                        be32_to_cpu(agf->agf_length) - len),
 535                                len, &XFS_RMAP_OINFO_SKIP_UPDATE,
 536                                XFS_AG_RESV_NONE);
 537}
 538
 539/* Retrieve AG geometry. */
 540int
 541xfs_ag_get_geometry(
 542        struct xfs_mount        *mp,
 543        xfs_agnumber_t          agno,
 544        struct xfs_ag_geometry  *ageo)
 545{
 546        struct xfs_buf          *agi_bp;
 547        struct xfs_buf          *agf_bp;
 548        struct xfs_agi          *agi;
 549        struct xfs_agf          *agf;
 550        struct xfs_perag        *pag;
 551        unsigned int            freeblks;
 552        int                     error;
 553
 554        if (agno >= mp->m_sb.sb_agcount)
 555                return -EINVAL;
 556
 557        /* Lock the AG headers. */
 558        error = xfs_ialloc_read_agi(mp, NULL, agno, &agi_bp);
 559        if (error)
 560                return error;
 561        error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agf_bp);
 562        if (error)
 563                goto out_agi;
 564        pag = xfs_perag_get(mp, agno);
 565
 566        /* Fill out form. */
 567        memset(ageo, 0, sizeof(*ageo));
 568        ageo->ag_number = agno;
 569
 570        agi = XFS_BUF_TO_AGI(agi_bp);
 571        ageo->ag_icount = be32_to_cpu(agi->agi_count);
 572        ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
 573
 574        agf = XFS_BUF_TO_AGF(agf_bp);
 575        ageo->ag_length = be32_to_cpu(agf->agf_length);
 576        freeblks = pag->pagf_freeblks +
 577                   pag->pagf_flcount +
 578                   pag->pagf_btreeblks -
 579                   xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE);
 580        ageo->ag_freeblks = freeblks;
 581        xfs_ag_geom_health(pag, ageo);
 582
 583        /* Release resources. */
 584        xfs_perag_put(pag);
 585        xfs_buf_relse(agf_bp);
 586out_agi:
 587        xfs_buf_relse(agi_bp);
 588        return error;
 589}
 590