linux/fs/xfs/scrub/bmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2017 Oracle.  All Rights Reserved.
   4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_trans_resv.h"
  11#include "xfs_mount.h"
  12#include "xfs_btree.h"
  13#include "xfs_bit.h"
  14#include "xfs_log_format.h"
  15#include "xfs_trans.h"
  16#include "xfs_inode.h"
  17#include "xfs_alloc.h"
  18#include "xfs_bmap.h"
  19#include "xfs_bmap_btree.h"
  20#include "xfs_rmap.h"
  21#include "xfs_rmap_btree.h"
  22#include "scrub/scrub.h"
  23#include "scrub/common.h"
  24#include "scrub/btree.h"
  25
  26/* Set us up with an inode's bmap. */
  27int
  28xchk_setup_inode_bmap(
  29        struct xfs_scrub        *sc,
  30        struct xfs_inode        *ip)
  31{
  32        int                     error;
  33
  34        error = xchk_get_inode(sc, ip);
  35        if (error)
  36                goto out;
  37
  38        sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
  39        xfs_ilock(sc->ip, sc->ilock_flags);
  40
  41        /*
  42         * We don't want any ephemeral data fork updates sitting around
  43         * while we inspect block mappings, so wait for directio to finish
  44         * and flush dirty data if we have delalloc reservations.
  45         */
  46        if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
  47            sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
  48                struct address_space    *mapping = VFS_I(sc->ip)->i_mapping;
  49
  50                inode_dio_wait(VFS_I(sc->ip));
  51
  52                /*
  53                 * Try to flush all incore state to disk before we examine the
  54                 * space mappings for the data fork.  Leave accumulated errors
  55                 * in the mapping for the writer threads to consume.
  56                 *
  57                 * On ENOSPC or EIO writeback errors, we continue into the
  58                 * extent mapping checks because write failures do not
  59                 * necessarily imply anything about the correctness of the file
  60                 * metadata.  The metadata and the file data could be on
  61                 * completely separate devices; a media failure might only
  62                 * affect a subset of the disk, etc.  We can handle delalloc
  63                 * extents in the scrubber, so leaving them in memory is fine.
  64                 */
  65                error = filemap_fdatawrite(mapping);
  66                if (!error)
  67                        error = filemap_fdatawait_keep_errors(mapping);
  68                if (error && (error != -ENOSPC && error != -EIO))
  69                        goto out;
  70        }
  71
  72        /* Got the inode, lock it and we're ready to go. */
  73        error = xchk_trans_alloc(sc, 0);
  74        if (error)
  75                goto out;
  76        sc->ilock_flags |= XFS_ILOCK_EXCL;
  77        xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
  78
  79out:
  80        /* scrub teardown will unlock and release the inode */
  81        return error;
  82}
  83
  84/*
  85 * Inode fork block mapping (BMBT) scrubber.
  86 * More complex than the others because we have to scrub
  87 * all the extents regardless of whether or not the fork
  88 * is in btree format.
  89 */
  90
  91struct xchk_bmap_info {
  92        struct xfs_scrub        *sc;
  93        xfs_fileoff_t           lastoff;
  94        bool                    is_rt;
  95        bool                    is_shared;
  96        bool                    was_loaded;
  97        int                     whichfork;
  98};
  99
 100/* Look for a corresponding rmap for this irec. */
 101static inline bool
 102xchk_bmap_get_rmap(
 103        struct xchk_bmap_info   *info,
 104        struct xfs_bmbt_irec    *irec,
 105        xfs_agblock_t           agbno,
 106        uint64_t                owner,
 107        struct xfs_rmap_irec    *rmap)
 108{
 109        xfs_fileoff_t           offset;
 110        unsigned int            rflags = 0;
 111        int                     has_rmap;
 112        int                     error;
 113
 114        if (info->whichfork == XFS_ATTR_FORK)
 115                rflags |= XFS_RMAP_ATTR_FORK;
 116
 117        /*
 118         * CoW staging extents are owned (on disk) by the refcountbt, so
 119         * their rmaps do not have offsets.
 120         */
 121        if (info->whichfork == XFS_COW_FORK)
 122                offset = 0;
 123        else
 124                offset = irec->br_startoff;
 125
 126        /*
 127         * If the caller thinks this could be a shared bmbt extent (IOWs,
 128         * any data fork extent of a reflink inode) then we have to use the
 129         * range rmap lookup to make sure we get the correct owner/offset.
 130         */
 131        if (info->is_shared) {
 132                error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
 133                                owner, offset, rflags, rmap, &has_rmap);
 134                if (!xchk_should_check_xref(info->sc, &error,
 135                                &info->sc->sa.rmap_cur))
 136                        return false;
 137                goto out;
 138        }
 139
 140        /*
 141         * Otherwise, use the (faster) regular lookup.
 142         */
 143        error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
 144                        offset, rflags, &has_rmap);
 145        if (!xchk_should_check_xref(info->sc, &error,
 146                        &info->sc->sa.rmap_cur))
 147                return false;
 148        if (!has_rmap)
 149                goto out;
 150
 151        error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
 152        if (!xchk_should_check_xref(info->sc, &error,
 153                        &info->sc->sa.rmap_cur))
 154                return false;
 155
 156out:
 157        if (!has_rmap)
 158                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 159                        irec->br_startoff);
 160        return has_rmap;
 161}
 162
 163/* Make sure that we have rmapbt records for this extent. */
 164STATIC void
 165xchk_bmap_xref_rmap(
 166        struct xchk_bmap_info   *info,
 167        struct xfs_bmbt_irec    *irec,
 168        xfs_agblock_t           agbno)
 169{
 170        struct xfs_rmap_irec    rmap;
 171        unsigned long long      rmap_end;
 172        uint64_t                owner;
 173
 174        if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
 175                return;
 176
 177        if (info->whichfork == XFS_COW_FORK)
 178                owner = XFS_RMAP_OWN_COW;
 179        else
 180                owner = info->sc->ip->i_ino;
 181
 182        /* Find the rmap record for this irec. */
 183        if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
 184                return;
 185
 186        /* Check the rmap. */
 187        rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
 188        if (rmap.rm_startblock > agbno ||
 189            agbno + irec->br_blockcount > rmap_end)
 190                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 191                                irec->br_startoff);
 192
 193        /*
 194         * Check the logical offsets if applicable.  CoW staging extents
 195         * don't track logical offsets since the mappings only exist in
 196         * memory.
 197         */
 198        if (info->whichfork != XFS_COW_FORK) {
 199                rmap_end = (unsigned long long)rmap.rm_offset +
 200                                rmap.rm_blockcount;
 201                if (rmap.rm_offset > irec->br_startoff ||
 202                    irec->br_startoff + irec->br_blockcount > rmap_end)
 203                        xchk_fblock_xref_set_corrupt(info->sc,
 204                                        info->whichfork, irec->br_startoff);
 205        }
 206
 207        if (rmap.rm_owner != owner)
 208                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 209                                irec->br_startoff);
 210
 211        /*
 212         * Check for discrepancies between the unwritten flag in the irec and
 213         * the rmap.  Note that the (in-memory) CoW fork distinguishes between
 214         * unwritten and written extents, but we don't track that in the rmap
 215         * records because the blocks are owned (on-disk) by the refcountbt,
 216         * which doesn't track unwritten state.
 217         */
 218        if (owner != XFS_RMAP_OWN_COW &&
 219            irec->br_state == XFS_EXT_UNWRITTEN &&
 220            !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
 221                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 222                                irec->br_startoff);
 223
 224        if (info->whichfork == XFS_ATTR_FORK &&
 225            !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
 226                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 227                                irec->br_startoff);
 228        if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
 229                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 230                                irec->br_startoff);
 231}
 232
 233/* Cross-reference a single rtdev extent record. */
 234STATIC void
 235xchk_bmap_rt_iextent_xref(
 236        struct xfs_inode        *ip,
 237        struct xchk_bmap_info   *info,
 238        struct xfs_bmbt_irec    *irec)
 239{
 240        xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
 241                        irec->br_blockcount);
 242}
 243
 244/* Cross-reference a single datadev extent record. */
 245STATIC void
 246xchk_bmap_iextent_xref(
 247        struct xfs_inode        *ip,
 248        struct xchk_bmap_info   *info,
 249        struct xfs_bmbt_irec    *irec)
 250{
 251        struct xfs_mount        *mp = info->sc->mp;
 252        xfs_agnumber_t          agno;
 253        xfs_agblock_t           agbno;
 254        xfs_extlen_t            len;
 255        int                     error;
 256
 257        agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
 258        agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
 259        len = irec->br_blockcount;
 260
 261        error = xchk_ag_init(info->sc, agno, &info->sc->sa);
 262        if (!xchk_fblock_process_error(info->sc, info->whichfork,
 263                        irec->br_startoff, &error))
 264                return;
 265
 266        xchk_xref_is_used_space(info->sc, agbno, len);
 267        xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
 268        xchk_bmap_xref_rmap(info, irec, agbno);
 269        switch (info->whichfork) {
 270        case XFS_DATA_FORK:
 271                if (xfs_is_reflink_inode(info->sc->ip))
 272                        break;
 273                /* fall through */
 274        case XFS_ATTR_FORK:
 275                xchk_xref_is_not_shared(info->sc, agbno,
 276                                irec->br_blockcount);
 277                break;
 278        case XFS_COW_FORK:
 279                xchk_xref_is_cow_staging(info->sc, agbno,
 280                                irec->br_blockcount);
 281                break;
 282        }
 283
 284        xchk_ag_free(info->sc, &info->sc->sa);
 285}
 286
 287/*
 288 * Directories and attr forks should never have blocks that can't be addressed
 289 * by a xfs_dablk_t.
 290 */
 291STATIC void
 292xchk_bmap_dirattr_extent(
 293        struct xfs_inode        *ip,
 294        struct xchk_bmap_info   *info,
 295        struct xfs_bmbt_irec    *irec)
 296{
 297        struct xfs_mount        *mp = ip->i_mount;
 298        xfs_fileoff_t           off;
 299
 300        if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK)
 301                return;
 302
 303        if (!xfs_verify_dablk(mp, irec->br_startoff))
 304                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 305                                irec->br_startoff);
 306
 307        off = irec->br_startoff + irec->br_blockcount - 1;
 308        if (!xfs_verify_dablk(mp, off))
 309                xchk_fblock_set_corrupt(info->sc, info->whichfork, off);
 310}
 311
 312/* Scrub a single extent record. */
 313STATIC int
 314xchk_bmap_iextent(
 315        struct xfs_inode        *ip,
 316        struct xchk_bmap_info   *info,
 317        struct xfs_bmbt_irec    *irec)
 318{
 319        struct xfs_mount        *mp = info->sc->mp;
 320        xfs_filblks_t           end;
 321        int                     error = 0;
 322
 323        /*
 324         * Check for out-of-order extents.  This record could have come
 325         * from the incore list, for which there is no ordering check.
 326         */
 327        if (irec->br_startoff < info->lastoff)
 328                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 329                                irec->br_startoff);
 330
 331        xchk_bmap_dirattr_extent(ip, info, irec);
 332
 333        /* There should never be a "hole" extent in either extent list. */
 334        if (irec->br_startblock == HOLESTARTBLOCK)
 335                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 336                                irec->br_startoff);
 337
 338        /*
 339         * Check for delalloc extents.  We never iterate the ones in the
 340         * in-core extent scan, and we should never see these in the bmbt.
 341         */
 342        if (isnullstartblock(irec->br_startblock))
 343                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 344                                irec->br_startoff);
 345
 346        /* Make sure the extent points to a valid place. */
 347        if (irec->br_blockcount > MAXEXTLEN)
 348                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 349                                irec->br_startoff);
 350        if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
 351                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 352                                irec->br_startoff);
 353        end = irec->br_startblock + irec->br_blockcount - 1;
 354        if (info->is_rt &&
 355            (!xfs_verify_rtbno(mp, irec->br_startblock) ||
 356             !xfs_verify_rtbno(mp, end)))
 357                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 358                                irec->br_startoff);
 359        if (!info->is_rt &&
 360            (!xfs_verify_fsbno(mp, irec->br_startblock) ||
 361             !xfs_verify_fsbno(mp, end) ||
 362             XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
 363                                XFS_FSB_TO_AGNO(mp, end)))
 364                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 365                                irec->br_startoff);
 366
 367        /* We don't allow unwritten extents on attr forks. */
 368        if (irec->br_state == XFS_EXT_UNWRITTEN &&
 369            info->whichfork == XFS_ATTR_FORK)
 370                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 371                                irec->br_startoff);
 372
 373        if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 374                return 0;
 375
 376        if (info->is_rt)
 377                xchk_bmap_rt_iextent_xref(ip, info, irec);
 378        else
 379                xchk_bmap_iextent_xref(ip, info, irec);
 380
 381        info->lastoff = irec->br_startoff + irec->br_blockcount;
 382        return error;
 383}
 384
 385/* Scrub a bmbt record. */
 386STATIC int
 387xchk_bmapbt_rec(
 388        struct xchk_btree       *bs,
 389        union xfs_btree_rec     *rec)
 390{
 391        struct xfs_bmbt_irec    irec;
 392        struct xfs_bmbt_irec    iext_irec;
 393        struct xfs_iext_cursor  icur;
 394        struct xchk_bmap_info   *info = bs->private;
 395        struct xfs_inode        *ip = bs->cur->bc_ino.ip;
 396        struct xfs_buf          *bp = NULL;
 397        struct xfs_btree_block  *block;
 398        struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, info->whichfork);
 399        uint64_t                owner;
 400        int                     i;
 401
 402        /*
 403         * Check the owners of the btree blocks up to the level below
 404         * the root since the verifiers don't do that.
 405         */
 406        if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
 407            bs->cur->bc_ptrs[0] == 1) {
 408                for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
 409                        block = xfs_btree_get_block(bs->cur, i, &bp);
 410                        owner = be64_to_cpu(block->bb_u.l.bb_owner);
 411                        if (owner != ip->i_ino)
 412                                xchk_fblock_set_corrupt(bs->sc,
 413                                                info->whichfork, 0);
 414                }
 415        }
 416
 417        /*
 418         * Check that the incore extent tree contains an extent that matches
 419         * this one exactly.  We validate those cached bmaps later, so we don't
 420         * need to check them here.  If the incore extent tree was just loaded
 421         * from disk by the scrubber, we assume that its contents match what's
 422         * on disk (we still hold the ILOCK) and skip the equivalence check.
 423         */
 424        if (!info->was_loaded)
 425                return 0;
 426
 427        xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
 428        if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur,
 429                                &iext_irec) ||
 430            irec.br_startoff != iext_irec.br_startoff ||
 431            irec.br_startblock != iext_irec.br_startblock ||
 432            irec.br_blockcount != iext_irec.br_blockcount ||
 433            irec.br_state != iext_irec.br_state)
 434                xchk_fblock_set_corrupt(bs->sc, info->whichfork,
 435                                irec.br_startoff);
 436        return 0;
 437}
 438
 439/* Scan the btree records. */
 440STATIC int
 441xchk_bmap_btree(
 442        struct xfs_scrub        *sc,
 443        int                     whichfork,
 444        struct xchk_bmap_info   *info)
 445{
 446        struct xfs_owner_info   oinfo;
 447        struct xfs_ifork        *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
 448        struct xfs_mount        *mp = sc->mp;
 449        struct xfs_inode        *ip = sc->ip;
 450        struct xfs_btree_cur    *cur;
 451        int                     error;
 452
 453        /* Load the incore bmap cache if it's not loaded. */
 454        info->was_loaded = ifp->if_flags & XFS_IFEXTENTS;
 455        if (!info->was_loaded) {
 456                error = xfs_iread_extents(sc->tp, ip, whichfork);
 457                if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
 458                        goto out;
 459        }
 460
 461        /* Check the btree structure. */
 462        cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
 463        xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
 464        error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
 465        xfs_btree_del_cursor(cur, error);
 466out:
 467        return error;
 468}
 469
 470struct xchk_bmap_check_rmap_info {
 471        struct xfs_scrub        *sc;
 472        int                     whichfork;
 473        struct xfs_iext_cursor  icur;
 474};
 475
 476/* Can we find bmaps that fit this rmap? */
 477STATIC int
 478xchk_bmap_check_rmap(
 479        struct xfs_btree_cur            *cur,
 480        struct xfs_rmap_irec            *rec,
 481        void                            *priv)
 482{
 483        struct xfs_bmbt_irec            irec;
 484        struct xchk_bmap_check_rmap_info        *sbcri = priv;
 485        struct xfs_ifork                *ifp;
 486        struct xfs_scrub                *sc = sbcri->sc;
 487        bool                            have_map;
 488
 489        /* Is this even the right fork? */
 490        if (rec->rm_owner != sc->ip->i_ino)
 491                return 0;
 492        if ((sbcri->whichfork == XFS_ATTR_FORK) ^
 493            !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
 494                return 0;
 495        if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
 496                return 0;
 497
 498        /* Now look up the bmbt record. */
 499        ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
 500        if (!ifp) {
 501                xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 502                                rec->rm_offset);
 503                goto out;
 504        }
 505        have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
 506                        &sbcri->icur, &irec);
 507        if (!have_map)
 508                xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 509                                rec->rm_offset);
 510        /*
 511         * bmap extent record lengths are constrained to 2^21 blocks in length
 512         * because of space constraints in the on-disk metadata structure.
 513         * However, rmap extent record lengths are constrained only by AG
 514         * length, so we have to loop through the bmbt to make sure that the
 515         * entire rmap is covered by bmbt records.
 516         */
 517        while (have_map) {
 518                if (irec.br_startoff != rec->rm_offset)
 519                        xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 520                                        rec->rm_offset);
 521                if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
 522                                cur->bc_ag.agno, rec->rm_startblock))
 523                        xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 524                                        rec->rm_offset);
 525                if (irec.br_blockcount > rec->rm_blockcount)
 526                        xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 527                                        rec->rm_offset);
 528                if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 529                        break;
 530                rec->rm_startblock += irec.br_blockcount;
 531                rec->rm_offset += irec.br_blockcount;
 532                rec->rm_blockcount -= irec.br_blockcount;
 533                if (rec->rm_blockcount == 0)
 534                        break;
 535                have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
 536                if (!have_map)
 537                        xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 538                                        rec->rm_offset);
 539        }
 540
 541out:
 542        if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 543                return -ECANCELED;
 544        return 0;
 545}
 546
 547/* Make sure each rmap has a corresponding bmbt entry. */
 548STATIC int
 549xchk_bmap_check_ag_rmaps(
 550        struct xfs_scrub                *sc,
 551        int                             whichfork,
 552        xfs_agnumber_t                  agno)
 553{
 554        struct xchk_bmap_check_rmap_info        sbcri;
 555        struct xfs_btree_cur            *cur;
 556        struct xfs_buf                  *agf;
 557        int                             error;
 558
 559        error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
 560        if (error)
 561                return error;
 562
 563        cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
 564        if (!cur) {
 565                error = -ENOMEM;
 566                goto out_agf;
 567        }
 568
 569        sbcri.sc = sc;
 570        sbcri.whichfork = whichfork;
 571        error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
 572        if (error == -ECANCELED)
 573                error = 0;
 574
 575        xfs_btree_del_cursor(cur, error);
 576out_agf:
 577        xfs_trans_brelse(sc->tp, agf);
 578        return error;
 579}
 580
 581/* Make sure each rmap has a corresponding bmbt entry. */
 582STATIC int
 583xchk_bmap_check_rmaps(
 584        struct xfs_scrub        *sc,
 585        int                     whichfork)
 586{
 587        struct xfs_ifork        *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
 588        xfs_agnumber_t          agno;
 589        bool                    zero_size;
 590        int                     error;
 591
 592        if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
 593            whichfork == XFS_COW_FORK ||
 594            (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
 595                return 0;
 596
 597        /* Don't support realtime rmap checks yet. */
 598        if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
 599                return 0;
 600
 601        ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL);
 602
 603        /*
 604         * Only do this for complex maps that are in btree format, or for
 605         * situations where we would seem to have a size but zero extents.
 606         * The inode repair code can zap broken iforks, which means we have
 607         * to flag this bmap as corrupt if there are rmaps that need to be
 608         * reattached.
 609         */
 610
 611        if (whichfork == XFS_DATA_FORK)
 612                zero_size = i_size_read(VFS_I(sc->ip)) == 0;
 613        else
 614                zero_size = false;
 615
 616        if (ifp->if_format != XFS_DINODE_FMT_BTREE &&
 617            (zero_size || ifp->if_nextents > 0))
 618                return 0;
 619
 620        for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
 621                error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
 622                if (error)
 623                        return error;
 624                if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 625                        break;
 626        }
 627
 628        return 0;
 629}
 630
 631/*
 632 * Scrub an inode fork's block mappings.
 633 *
 634 * First we scan every record in every btree block, if applicable.
 635 * Then we unconditionally scan the incore extent cache.
 636 */
 637STATIC int
 638xchk_bmap(
 639        struct xfs_scrub        *sc,
 640        int                     whichfork)
 641{
 642        struct xfs_bmbt_irec    irec;
 643        struct xchk_bmap_info   info = { NULL };
 644        struct xfs_mount        *mp = sc->mp;
 645        struct xfs_inode        *ip = sc->ip;
 646        struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
 647        xfs_fileoff_t           endoff;
 648        struct xfs_iext_cursor  icur;
 649        int                     error = 0;
 650
 651        /* Non-existent forks can be ignored. */
 652        if (!ifp)
 653                goto out;
 654
 655        info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
 656        info.whichfork = whichfork;
 657        info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
 658        info.sc = sc;
 659
 660        switch (whichfork) {
 661        case XFS_COW_FORK:
 662                /* No CoW forks on non-reflink inodes/filesystems. */
 663                if (!xfs_is_reflink_inode(ip)) {
 664                        xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 665                        goto out;
 666                }
 667                break;
 668        case XFS_ATTR_FORK:
 669                if (!xfs_sb_version_hasattr(&mp->m_sb) &&
 670                    !xfs_sb_version_hasattr2(&mp->m_sb))
 671                        xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 672                break;
 673        default:
 674                ASSERT(whichfork == XFS_DATA_FORK);
 675                break;
 676        }
 677
 678        /* Check the fork values */
 679        switch (ifp->if_format) {
 680        case XFS_DINODE_FMT_UUID:
 681        case XFS_DINODE_FMT_DEV:
 682        case XFS_DINODE_FMT_LOCAL:
 683                /* No mappings to check. */
 684                goto out;
 685        case XFS_DINODE_FMT_EXTENTS:
 686                if (!(ifp->if_flags & XFS_IFEXTENTS)) {
 687                        xchk_fblock_set_corrupt(sc, whichfork, 0);
 688                        goto out;
 689                }
 690                break;
 691        case XFS_DINODE_FMT_BTREE:
 692                if (whichfork == XFS_COW_FORK) {
 693                        xchk_fblock_set_corrupt(sc, whichfork, 0);
 694                        goto out;
 695                }
 696
 697                error = xchk_bmap_btree(sc, whichfork, &info);
 698                if (error)
 699                        goto out;
 700                break;
 701        default:
 702                xchk_fblock_set_corrupt(sc, whichfork, 0);
 703                goto out;
 704        }
 705
 706        if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 707                goto out;
 708
 709        /* Find the offset of the last extent in the mapping. */
 710        error = xfs_bmap_last_offset(ip, &endoff, whichfork);
 711        if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
 712                goto out;
 713
 714        /* Scrub extent records. */
 715        info.lastoff = 0;
 716        ifp = XFS_IFORK_PTR(ip, whichfork);
 717        for_each_xfs_iext(ifp, &icur, &irec) {
 718                if (xchk_should_terminate(sc, &error) ||
 719                    (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
 720                        goto out;
 721                if (isnullstartblock(irec.br_startblock))
 722                        continue;
 723                if (irec.br_startoff >= endoff) {
 724                        xchk_fblock_set_corrupt(sc, whichfork,
 725                                        irec.br_startoff);
 726                        goto out;
 727                }
 728                error = xchk_bmap_iextent(ip, &info, &irec);
 729                if (error)
 730                        goto out;
 731        }
 732
 733        error = xchk_bmap_check_rmaps(sc, whichfork);
 734        if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
 735                goto out;
 736out:
 737        return error;
 738}
 739
 740/* Scrub an inode's data fork. */
 741int
 742xchk_bmap_data(
 743        struct xfs_scrub        *sc)
 744{
 745        return xchk_bmap(sc, XFS_DATA_FORK);
 746}
 747
 748/* Scrub an inode's attr fork. */
 749int
 750xchk_bmap_attr(
 751        struct xfs_scrub        *sc)
 752{
 753        return xchk_bmap(sc, XFS_ATTR_FORK);
 754}
 755
 756/* Scrub an inode's CoW fork. */
 757int
 758xchk_bmap_cow(
 759        struct xfs_scrub        *sc)
 760{
 761        if (!xfs_is_reflink_inode(sc->ip))
 762                return -ENOENT;
 763
 764        return xchk_bmap(sc, XFS_COW_FORK);
 765}
 766