linux/fs/xfs/scrub/ialloc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2017 Oracle.  All Rights Reserved.
   4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_trans_resv.h"
  11#include "xfs_mount.h"
  12#include "xfs_defer.h"
  13#include "xfs_btree.h"
  14#include "xfs_bit.h"
  15#include "xfs_log_format.h"
  16#include "xfs_trans.h"
  17#include "xfs_sb.h"
  18#include "xfs_inode.h"
  19#include "xfs_alloc.h"
  20#include "xfs_ialloc.h"
  21#include "xfs_ialloc_btree.h"
  22#include "xfs_icache.h"
  23#include "xfs_rmap.h"
  24#include "xfs_log.h"
  25#include "xfs_trans_priv.h"
  26#include "scrub/xfs_scrub.h"
  27#include "scrub/scrub.h"
  28#include "scrub/common.h"
  29#include "scrub/btree.h"
  30#include "scrub/trace.h"
  31
  32/*
  33 * Set us up to scrub inode btrees.
  34 * If we detect a discrepancy between the inobt and the inode,
  35 * try again after forcing logged inode cores out to disk.
  36 */
  37int
  38xchk_setup_ag_iallocbt(
  39        struct xfs_scrub        *sc,
  40        struct xfs_inode        *ip)
  41{
  42        return xchk_setup_ag_btree(sc, ip, sc->flags & XCHK_TRY_HARDER);
  43}
  44
  45/* Inode btree scrubber. */
  46
  47struct xchk_iallocbt {
  48        /* Number of inodes we see while scanning inobt. */
  49        unsigned long long      inodes;
  50
  51        /* Expected next startino, for big block filesystems. */
  52        xfs_agino_t             next_startino;
  53
  54        /* Expected end of the current inode cluster. */
  55        xfs_agino_t             next_cluster_ino;
  56};
  57
  58/*
  59 * If we're checking the finobt, cross-reference with the inobt.
  60 * Otherwise we're checking the inobt; if there is an finobt, make sure
  61 * we have a record or not depending on freecount.
  62 */
  63static inline void
  64xchk_iallocbt_chunk_xref_other(
  65        struct xfs_scrub                *sc,
  66        struct xfs_inobt_rec_incore     *irec,
  67        xfs_agino_t                     agino)
  68{
  69        struct xfs_btree_cur            **pcur;
  70        bool                            has_irec;
  71        int                             error;
  72
  73        if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
  74                pcur = &sc->sa.ino_cur;
  75        else
  76                pcur = &sc->sa.fino_cur;
  77        if (!(*pcur))
  78                return;
  79        error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
  80        if (!xchk_should_check_xref(sc, &error, pcur))
  81                return;
  82        if (((irec->ir_freecount > 0 && !has_irec) ||
  83             (irec->ir_freecount == 0 && has_irec)))
  84                xchk_btree_xref_set_corrupt(sc, *pcur, 0);
  85}
  86
  87/* Cross-reference with the other btrees. */
  88STATIC void
  89xchk_iallocbt_chunk_xref(
  90        struct xfs_scrub                *sc,
  91        struct xfs_inobt_rec_incore     *irec,
  92        xfs_agino_t                     agino,
  93        xfs_agblock_t                   agbno,
  94        xfs_extlen_t                    len)
  95{
  96        if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  97                return;
  98
  99        xchk_xref_is_used_space(sc, agbno, len);
 100        xchk_iallocbt_chunk_xref_other(sc, irec, agino);
 101        xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
 102        xchk_xref_is_not_shared(sc, agbno, len);
 103}
 104
 105/* Is this chunk worth checking? */
 106STATIC bool
 107xchk_iallocbt_chunk(
 108        struct xchk_btree               *bs,
 109        struct xfs_inobt_rec_incore     *irec,
 110        xfs_agino_t                     agino,
 111        xfs_extlen_t                    len)
 112{
 113        struct xfs_mount                *mp = bs->cur->bc_mp;
 114        xfs_agnumber_t                  agno = bs->cur->bc_private.a.agno;
 115        xfs_agblock_t                   bno;
 116
 117        bno = XFS_AGINO_TO_AGBNO(mp, agino);
 118        if (bno + len <= bno ||
 119            !xfs_verify_agbno(mp, agno, bno) ||
 120            !xfs_verify_agbno(mp, agno, bno + len - 1))
 121                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 122
 123        xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
 124
 125        return true;
 126}
 127
 128/* Count the number of free inodes. */
 129static unsigned int
 130xchk_iallocbt_freecount(
 131        xfs_inofree_t                   freemask)
 132{
 133        BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
 134        return hweight64(freemask);
 135}
 136
 137/*
 138 * Check that an inode's allocation status matches ir_free in the inobt
 139 * record.  First we try querying the in-core inode state, and if the inode
 140 * isn't loaded we examine the on-disk inode directly.
 141 *
 142 * Since there can be 1:M and M:1 mappings between inobt records and inode
 143 * clusters, we pass in the inode location information as an inobt record;
 144 * the index of an inode cluster within the inobt record (as well as the
 145 * cluster buffer itself); and the index of the inode within the cluster.
 146 *
 147 * @irec is the inobt record.
 148 * @irec_ino is the inode offset from the start of the record.
 149 * @dip is the on-disk inode.
 150 */
 151STATIC int
 152xchk_iallocbt_check_cluster_ifree(
 153        struct xchk_btree               *bs,
 154        struct xfs_inobt_rec_incore     *irec,
 155        unsigned int                    irec_ino,
 156        struct xfs_dinode               *dip)
 157{
 158        struct xfs_mount                *mp = bs->cur->bc_mp;
 159        xfs_ino_t                       fsino;
 160        xfs_agino_t                     agino;
 161        bool                            irec_free;
 162        bool                            ino_inuse;
 163        bool                            freemask_ok;
 164        int                             error = 0;
 165
 166        if (xchk_should_terminate(bs->sc, &error))
 167                return error;
 168
 169        /*
 170         * Given an inobt record and the offset of an inode from the start of
 171         * the record, compute which fs inode we're talking about.
 172         */
 173        agino = irec->ir_startino + irec_ino;
 174        fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
 175        irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
 176
 177        if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
 178            (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
 179                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 180                goto out;
 181        }
 182
 183        error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino,
 184                        &ino_inuse);
 185        if (error == -ENODATA) {
 186                /* Not cached, just read the disk buffer */
 187                freemask_ok = irec_free ^ !!(dip->di_mode);
 188                if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok)
 189                        return -EDEADLOCK;
 190        } else if (error < 0) {
 191                /*
 192                 * Inode is only half assembled, or there was an IO error,
 193                 * or the verifier failed, so don't bother trying to check.
 194                 * The inode scrubber can deal with this.
 195                 */
 196                goto out;
 197        } else {
 198                /* Inode is all there. */
 199                freemask_ok = irec_free ^ ino_inuse;
 200        }
 201        if (!freemask_ok)
 202                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 203out:
 204        return 0;
 205}
 206
 207/*
 208 * Check that the holemask and freemask of a hypothetical inode cluster match
 209 * what's actually on disk.  If sparse inodes are enabled, the cluster does
 210 * not actually have to map to inodes if the corresponding holemask bit is set.
 211 *
 212 * @cluster_base is the first inode in the cluster within the @irec.
 213 */
 214STATIC int
 215xchk_iallocbt_check_cluster(
 216        struct xchk_btree               *bs,
 217        struct xfs_inobt_rec_incore     *irec,
 218        unsigned int                    cluster_base)
 219{
 220        struct xfs_imap                 imap;
 221        struct xfs_mount                *mp = bs->cur->bc_mp;
 222        struct xfs_dinode               *dip;
 223        struct xfs_buf                  *cluster_bp;
 224        unsigned int                    nr_inodes;
 225        xfs_agnumber_t                  agno = bs->cur->bc_private.a.agno;
 226        xfs_agblock_t                   agbno;
 227        unsigned int                    cluster_index;
 228        uint16_t                        cluster_mask = 0;
 229        uint16_t                        ir_holemask;
 230        int                             error = 0;
 231
 232        nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
 233                        mp->m_inodes_per_cluster);
 234
 235        /* Map this inode cluster */
 236        agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
 237
 238        /* Compute a bitmask for this cluster that can be used for holemask. */
 239        for (cluster_index = 0;
 240             cluster_index < nr_inodes;
 241             cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
 242                cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
 243                                XFS_INODES_PER_HOLEMASK_BIT);
 244
 245        /*
 246         * Map the first inode of this cluster to a buffer and offset.
 247         * Be careful about inobt records that don't align with the start of
 248         * the inode buffer when block sizes are large enough to hold multiple
 249         * inode chunks.  When this happens, cluster_base will be zero but
 250         * ir_startino can be large enough to make im_boffset nonzero.
 251         */
 252        ir_holemask = (irec->ir_holemask & cluster_mask);
 253        imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
 254        imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
 255        imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
 256                        mp->m_sb.sb_inodelog;
 257
 258        if (imap.im_boffset != 0 && cluster_base != 0) {
 259                ASSERT(imap.im_boffset == 0 || cluster_base == 0);
 260                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 261                return 0;
 262        }
 263
 264        trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
 265                        imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
 266                        cluster_mask, ir_holemask,
 267                        XFS_INO_TO_OFFSET(mp, irec->ir_startino +
 268                                          cluster_base));
 269
 270        /* The whole cluster must be a hole or not a hole. */
 271        if (ir_holemask != cluster_mask && ir_holemask != 0) {
 272                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 273                return 0;
 274        }
 275
 276        /* If any part of this is a hole, skip it. */
 277        if (ir_holemask) {
 278                xchk_xref_is_not_owned_by(bs->sc, agbno,
 279                                mp->m_blocks_per_cluster,
 280                                &XFS_RMAP_OINFO_INODES);
 281                return 0;
 282        }
 283
 284        xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
 285                        &XFS_RMAP_OINFO_INODES);
 286
 287        /* Grab the inode cluster buffer. */
 288        error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp,
 289                        0, 0);
 290        if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
 291                return error;
 292
 293        /* Check free status of each inode within this cluster. */
 294        for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
 295                struct xfs_dinode       *dip;
 296
 297                if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
 298                        xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 299                        break;
 300                }
 301
 302                dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
 303                error = xchk_iallocbt_check_cluster_ifree(bs, irec,
 304                                cluster_base + cluster_index, dip);
 305                if (error)
 306                        break;
 307                imap.im_boffset += mp->m_sb.sb_inodesize;
 308        }
 309
 310        xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
 311        return error;
 312}
 313
 314/*
 315 * For all the inode clusters that could map to this inobt record, make sure
 316 * that the holemask makes sense and that the allocation status of each inode
 317 * matches the freemask.
 318 */
 319STATIC int
 320xchk_iallocbt_check_clusters(
 321        struct xchk_btree               *bs,
 322        struct xfs_inobt_rec_incore     *irec)
 323{
 324        unsigned int                    cluster_base;
 325        int                             error = 0;
 326
 327        /*
 328         * For the common case where this inobt record maps to multiple inode
 329         * clusters this will call _check_cluster for each cluster.
 330         *
 331         * For the case that multiple inobt records map to a single cluster,
 332         * this will call _check_cluster once.
 333         */
 334        for (cluster_base = 0;
 335             cluster_base < XFS_INODES_PER_CHUNK;
 336             cluster_base += bs->sc->mp->m_inodes_per_cluster) {
 337                error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
 338                if (error)
 339                        break;
 340        }
 341
 342        return error;
 343}
 344
 345/*
 346 * Make sure this inode btree record is aligned properly.  Because a fs block
 347 * contains multiple inodes, we check that the inobt record is aligned to the
 348 * correct inode, not just the correct block on disk.  This results in a finer
 349 * grained corruption check.
 350 */
 351STATIC void
 352xchk_iallocbt_rec_alignment(
 353        struct xchk_btree               *bs,
 354        struct xfs_inobt_rec_incore     *irec)
 355{
 356        struct xfs_mount                *mp = bs->sc->mp;
 357        struct xchk_iallocbt            *iabt = bs->private;
 358
 359        /*
 360         * finobt records have different positioning requirements than inobt
 361         * records: each finobt record must have a corresponding inobt record.
 362         * That is checked in the xref function, so for now we only catch the
 363         * obvious case where the record isn't at all aligned properly.
 364         *
 365         * Note that if a fs block contains more than a single chunk of inodes,
 366         * we will have finobt records only for those chunks containing free
 367         * inodes, and therefore expect chunk alignment of finobt records.
 368         * Otherwise, we expect that the finobt record is aligned to the
 369         * cluster alignment as told by the superblock.
 370         */
 371        if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
 372                unsigned int    imask;
 373
 374                imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
 375                                mp->m_cluster_align_inodes) - 1;
 376                if (irec->ir_startino & imask)
 377                        xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 378                return;
 379        }
 380
 381        if (iabt->next_startino != NULLAGINO) {
 382                /*
 383                 * We're midway through a cluster of inodes that is mapped by
 384                 * multiple inobt records.  Did we get the record for the next
 385                 * irec in the sequence?
 386                 */
 387                if (irec->ir_startino != iabt->next_startino) {
 388                        xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 389                        return;
 390                }
 391
 392                iabt->next_startino += XFS_INODES_PER_CHUNK;
 393
 394                /* Are we done with the cluster? */
 395                if (iabt->next_startino >= iabt->next_cluster_ino) {
 396                        iabt->next_startino = NULLAGINO;
 397                        iabt->next_cluster_ino = NULLAGINO;
 398                }
 399                return;
 400        }
 401
 402        /* inobt records must be aligned to cluster and inoalignmnt size. */
 403        if (irec->ir_startino & (mp->m_cluster_align_inodes - 1)) {
 404                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 405                return;
 406        }
 407
 408        if (irec->ir_startino & (mp->m_inodes_per_cluster - 1)) {
 409                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 410                return;
 411        }
 412
 413        if (mp->m_inodes_per_cluster <= XFS_INODES_PER_CHUNK)
 414                return;
 415
 416        /*
 417         * If this is the start of an inode cluster that can be mapped by
 418         * multiple inobt records, the next inobt record must follow exactly
 419         * after this one.
 420         */
 421        iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
 422        iabt->next_cluster_ino = irec->ir_startino + mp->m_inodes_per_cluster;
 423}
 424
 425/* Scrub an inobt/finobt record. */
 426STATIC int
 427xchk_iallocbt_rec(
 428        struct xchk_btree               *bs,
 429        union xfs_btree_rec             *rec)
 430{
 431        struct xfs_mount                *mp = bs->cur->bc_mp;
 432        struct xchk_iallocbt            *iabt = bs->private;
 433        struct xfs_inobt_rec_incore     irec;
 434        uint64_t                        holes;
 435        xfs_agnumber_t                  agno = bs->cur->bc_private.a.agno;
 436        xfs_agino_t                     agino;
 437        xfs_extlen_t                    len;
 438        int                             holecount;
 439        int                             i;
 440        int                             error = 0;
 441        unsigned int                    real_freecount;
 442        uint16_t                        holemask;
 443
 444        xfs_inobt_btrec_to_irec(mp, rec, &irec);
 445
 446        if (irec.ir_count > XFS_INODES_PER_CHUNK ||
 447            irec.ir_freecount > XFS_INODES_PER_CHUNK)
 448                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 449
 450        real_freecount = irec.ir_freecount +
 451                        (XFS_INODES_PER_CHUNK - irec.ir_count);
 452        if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
 453                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 454
 455        agino = irec.ir_startino;
 456        /* Record has to be properly aligned within the AG. */
 457        if (!xfs_verify_agino(mp, agno, agino) ||
 458            !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
 459                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 460                goto out;
 461        }
 462
 463        xchk_iallocbt_rec_alignment(bs, &irec);
 464        if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 465                goto out;
 466
 467        iabt->inodes += irec.ir_count;
 468
 469        /* Handle non-sparse inodes */
 470        if (!xfs_inobt_issparse(irec.ir_holemask)) {
 471                len = XFS_B_TO_FSB(mp,
 472                                XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
 473                if (irec.ir_count != XFS_INODES_PER_CHUNK)
 474                        xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 475
 476                if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
 477                        goto out;
 478                goto check_clusters;
 479        }
 480
 481        /* Check each chunk of a sparse inode cluster. */
 482        holemask = irec.ir_holemask;
 483        holecount = 0;
 484        len = XFS_B_TO_FSB(mp,
 485                        XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
 486        holes = ~xfs_inobt_irec_to_allocmask(&irec);
 487        if ((holes & irec.ir_free) != holes ||
 488            irec.ir_freecount > irec.ir_count)
 489                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 490
 491        for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
 492                if (holemask & 1)
 493                        holecount += XFS_INODES_PER_HOLEMASK_BIT;
 494                else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
 495                        break;
 496                holemask >>= 1;
 497                agino += XFS_INODES_PER_HOLEMASK_BIT;
 498        }
 499
 500        if (holecount > XFS_INODES_PER_CHUNK ||
 501            holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
 502                xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
 503
 504check_clusters:
 505        error = xchk_iallocbt_check_clusters(bs, &irec);
 506        if (error)
 507                goto out;
 508
 509out:
 510        return error;
 511}
 512
 513/*
 514 * Make sure the inode btrees are as large as the rmap thinks they are.
 515 * Don't bother if we're missing btree cursors, as we're already corrupt.
 516 */
 517STATIC void
 518xchk_iallocbt_xref_rmap_btreeblks(
 519        struct xfs_scrub        *sc,
 520        int                     which)
 521{
 522        xfs_filblks_t           blocks;
 523        xfs_extlen_t            inobt_blocks = 0;
 524        xfs_extlen_t            finobt_blocks = 0;
 525        int                     error;
 526
 527        if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
 528            (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
 529            xchk_skip_xref(sc->sm))
 530                return;
 531
 532        /* Check that we saw as many inobt blocks as the rmap says. */
 533        error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
 534        if (!xchk_process_error(sc, 0, 0, &error))
 535                return;
 536
 537        if (sc->sa.fino_cur) {
 538                error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
 539                if (!xchk_process_error(sc, 0, 0, &error))
 540                        return;
 541        }
 542
 543        error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
 544                        &XFS_RMAP_OINFO_INOBT, &blocks);
 545        if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
 546                return;
 547        if (blocks != inobt_blocks + finobt_blocks)
 548                xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
 549}
 550
 551/*
 552 * Make sure that the inobt records point to the same number of blocks as
 553 * the rmap says are owned by inodes.
 554 */
 555STATIC void
 556xchk_iallocbt_xref_rmap_inodes(
 557        struct xfs_scrub        *sc,
 558        int                     which,
 559        unsigned long long      inodes)
 560{
 561        xfs_filblks_t           blocks;
 562        xfs_filblks_t           inode_blocks;
 563        int                     error;
 564
 565        if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
 566                return;
 567
 568        /* Check that we saw as many inode blocks as the rmap knows about. */
 569        error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
 570                        &XFS_RMAP_OINFO_INODES, &blocks);
 571        if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
 572                return;
 573        inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
 574        if (blocks != inode_blocks)
 575                xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
 576}
 577
 578/* Scrub the inode btrees for some AG. */
 579STATIC int
 580xchk_iallocbt(
 581        struct xfs_scrub        *sc,
 582        xfs_btnum_t             which)
 583{
 584        struct xfs_btree_cur    *cur;
 585        struct xchk_iallocbt    iabt = {
 586                .inodes         = 0,
 587                .next_startino  = NULLAGINO,
 588                .next_cluster_ino = NULLAGINO,
 589        };
 590        int                     error;
 591
 592        cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
 593        error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
 594                        &iabt);
 595        if (error)
 596                return error;
 597
 598        xchk_iallocbt_xref_rmap_btreeblks(sc, which);
 599
 600        /*
 601         * If we're scrubbing the inode btree, inode_blocks is the number of
 602         * blocks pointed to by all the inode chunk records.  Therefore, we
 603         * should compare to the number of inode chunk blocks that the rmap
 604         * knows about.  We can't do this for the finobt since it only points
 605         * to inode chunks with free inodes.
 606         */
 607        if (which == XFS_BTNUM_INO)
 608                xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
 609
 610        return error;
 611}
 612
 613int
 614xchk_inobt(
 615        struct xfs_scrub        *sc)
 616{
 617        return xchk_iallocbt(sc, XFS_BTNUM_INO);
 618}
 619
 620int
 621xchk_finobt(
 622        struct xfs_scrub        *sc)
 623{
 624        return xchk_iallocbt(sc, XFS_BTNUM_FINO);
 625}
 626
 627/* See if an inode btree has (or doesn't have) an inode chunk record. */
 628static inline void
 629xchk_xref_inode_check(
 630        struct xfs_scrub        *sc,
 631        xfs_agblock_t           agbno,
 632        xfs_extlen_t            len,
 633        struct xfs_btree_cur    **icur,
 634        bool                    should_have_inodes)
 635{
 636        bool                    has_inodes;
 637        int                     error;
 638
 639        if (!(*icur) || xchk_skip_xref(sc->sm))
 640                return;
 641
 642        error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
 643        if (!xchk_should_check_xref(sc, &error, icur))
 644                return;
 645        if (has_inodes != should_have_inodes)
 646                xchk_btree_xref_set_corrupt(sc, *icur, 0);
 647}
 648
 649/* xref check that the extent is not covered by inodes */
 650void
 651xchk_xref_is_not_inode_chunk(
 652        struct xfs_scrub        *sc,
 653        xfs_agblock_t           agbno,
 654        xfs_extlen_t            len)
 655{
 656        xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
 657        xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
 658}
 659
 660/* xref check that the extent is covered by inodes */
 661void
 662xchk_xref_is_inode_chunk(
 663        struct xfs_scrub        *sc,
 664        xfs_agblock_t           agbno,
 665        xfs_extlen_t            len)
 666{
 667        xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
 668}
 669