linux/fs/xfs/scrub/bmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2017 Oracle.  All Rights Reserved.
   4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_trans_resv.h"
  11#include "xfs_mount.h"
  12#include "xfs_btree.h"
  13#include "xfs_bit.h"
  14#include "xfs_log_format.h"
  15#include "xfs_trans.h"
  16#include "xfs_inode.h"
  17#include "xfs_alloc.h"
  18#include "xfs_bmap.h"
  19#include "xfs_bmap_btree.h"
  20#include "xfs_rmap.h"
  21#include "xfs_rmap_btree.h"
  22#include "scrub/scrub.h"
  23#include "scrub/common.h"
  24#include "scrub/btree.h"
  25#include "xfs_ag.h"
  26
  27/* Set us up with an inode's bmap. */
  28int
  29xchk_setup_inode_bmap(
  30        struct xfs_scrub        *sc)
  31{
  32        int                     error;
  33
  34        error = xchk_get_inode(sc);
  35        if (error)
  36                goto out;
  37
  38        sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
  39        xfs_ilock(sc->ip, sc->ilock_flags);
  40
  41        /*
  42         * We don't want any ephemeral data fork updates sitting around
  43         * while we inspect block mappings, so wait for directio to finish
  44         * and flush dirty data if we have delalloc reservations.
  45         */
  46        if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
  47            sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
  48                struct address_space    *mapping = VFS_I(sc->ip)->i_mapping;
  49
  50                inode_dio_wait(VFS_I(sc->ip));
  51
  52                /*
  53                 * Try to flush all incore state to disk before we examine the
  54                 * space mappings for the data fork.  Leave accumulated errors
  55                 * in the mapping for the writer threads to consume.
  56                 *
  57                 * On ENOSPC or EIO writeback errors, we continue into the
  58                 * extent mapping checks because write failures do not
  59                 * necessarily imply anything about the correctness of the file
  60                 * metadata.  The metadata and the file data could be on
  61                 * completely separate devices; a media failure might only
  62                 * affect a subset of the disk, etc.  We can handle delalloc
  63                 * extents in the scrubber, so leaving them in memory is fine.
  64                 */
  65                error = filemap_fdatawrite(mapping);
  66                if (!error)
  67                        error = filemap_fdatawait_keep_errors(mapping);
  68                if (error && (error != -ENOSPC && error != -EIO))
  69                        goto out;
  70        }
  71
  72        /* Got the inode, lock it and we're ready to go. */
  73        error = xchk_trans_alloc(sc, 0);
  74        if (error)
  75                goto out;
  76        sc->ilock_flags |= XFS_ILOCK_EXCL;
  77        xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
  78
  79out:
  80        /* scrub teardown will unlock and release the inode */
  81        return error;
  82}
  83
  84/*
  85 * Inode fork block mapping (BMBT) scrubber.
  86 * More complex than the others because we have to scrub
  87 * all the extents regardless of whether or not the fork
  88 * is in btree format.
  89 */
  90
  91struct xchk_bmap_info {
  92        struct xfs_scrub        *sc;
  93        xfs_fileoff_t           lastoff;
  94        bool                    is_rt;
  95        bool                    is_shared;
  96        bool                    was_loaded;
  97        int                     whichfork;
  98};
  99
 100/* Look for a corresponding rmap for this irec. */
 101static inline bool
 102xchk_bmap_get_rmap(
 103        struct xchk_bmap_info   *info,
 104        struct xfs_bmbt_irec    *irec,
 105        xfs_agblock_t           agbno,
 106        uint64_t                owner,
 107        struct xfs_rmap_irec    *rmap)
 108{
 109        xfs_fileoff_t           offset;
 110        unsigned int            rflags = 0;
 111        int                     has_rmap;
 112        int                     error;
 113
 114        if (info->whichfork == XFS_ATTR_FORK)
 115                rflags |= XFS_RMAP_ATTR_FORK;
 116        if (irec->br_state == XFS_EXT_UNWRITTEN)
 117                rflags |= XFS_RMAP_UNWRITTEN;
 118
 119        /*
 120         * CoW staging extents are owned (on disk) by the refcountbt, so
 121         * their rmaps do not have offsets.
 122         */
 123        if (info->whichfork == XFS_COW_FORK)
 124                offset = 0;
 125        else
 126                offset = irec->br_startoff;
 127
 128        /*
 129         * If the caller thinks this could be a shared bmbt extent (IOWs,
 130         * any data fork extent of a reflink inode) then we have to use the
 131         * range rmap lookup to make sure we get the correct owner/offset.
 132         */
 133        if (info->is_shared) {
 134                error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
 135                                owner, offset, rflags, rmap, &has_rmap);
 136                if (!xchk_should_check_xref(info->sc, &error,
 137                                &info->sc->sa.rmap_cur))
 138                        return false;
 139                goto out;
 140        }
 141
 142        /*
 143         * Otherwise, use the (faster) regular lookup.
 144         */
 145        error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
 146                        offset, rflags, &has_rmap);
 147        if (!xchk_should_check_xref(info->sc, &error,
 148                        &info->sc->sa.rmap_cur))
 149                return false;
 150        if (!has_rmap)
 151                goto out;
 152
 153        error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
 154        if (!xchk_should_check_xref(info->sc, &error,
 155                        &info->sc->sa.rmap_cur))
 156                return false;
 157
 158out:
 159        if (!has_rmap)
 160                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 161                        irec->br_startoff);
 162        return has_rmap;
 163}
 164
 165/* Make sure that we have rmapbt records for this extent. */
 166STATIC void
 167xchk_bmap_xref_rmap(
 168        struct xchk_bmap_info   *info,
 169        struct xfs_bmbt_irec    *irec,
 170        xfs_agblock_t           agbno)
 171{
 172        struct xfs_rmap_irec    rmap;
 173        unsigned long long      rmap_end;
 174        uint64_t                owner;
 175
 176        if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
 177                return;
 178
 179        if (info->whichfork == XFS_COW_FORK)
 180                owner = XFS_RMAP_OWN_COW;
 181        else
 182                owner = info->sc->ip->i_ino;
 183
 184        /* Find the rmap record for this irec. */
 185        if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
 186                return;
 187
 188        /* Check the rmap. */
 189        rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
 190        if (rmap.rm_startblock > agbno ||
 191            agbno + irec->br_blockcount > rmap_end)
 192                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 193                                irec->br_startoff);
 194
 195        /*
 196         * Check the logical offsets if applicable.  CoW staging extents
 197         * don't track logical offsets since the mappings only exist in
 198         * memory.
 199         */
 200        if (info->whichfork != XFS_COW_FORK) {
 201                rmap_end = (unsigned long long)rmap.rm_offset +
 202                                rmap.rm_blockcount;
 203                if (rmap.rm_offset > irec->br_startoff ||
 204                    irec->br_startoff + irec->br_blockcount > rmap_end)
 205                        xchk_fblock_xref_set_corrupt(info->sc,
 206                                        info->whichfork, irec->br_startoff);
 207        }
 208
 209        if (rmap.rm_owner != owner)
 210                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 211                                irec->br_startoff);
 212
 213        /*
 214         * Check for discrepancies between the unwritten flag in the irec and
 215         * the rmap.  Note that the (in-memory) CoW fork distinguishes between
 216         * unwritten and written extents, but we don't track that in the rmap
 217         * records because the blocks are owned (on-disk) by the refcountbt,
 218         * which doesn't track unwritten state.
 219         */
 220        if (owner != XFS_RMAP_OWN_COW &&
 221            !!(irec->br_state == XFS_EXT_UNWRITTEN) !=
 222            !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
 223                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 224                                irec->br_startoff);
 225
 226        if (!!(info->whichfork == XFS_ATTR_FORK) !=
 227            !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
 228                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 229                                irec->br_startoff);
 230        if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
 231                xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
 232                                irec->br_startoff);
 233}
 234
 235/* Cross-reference a single rtdev extent record. */
 236STATIC void
 237xchk_bmap_rt_iextent_xref(
 238        struct xfs_inode        *ip,
 239        struct xchk_bmap_info   *info,
 240        struct xfs_bmbt_irec    *irec)
 241{
 242        xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
 243                        irec->br_blockcount);
 244}
 245
 246/* Cross-reference a single datadev extent record. */
 247STATIC void
 248xchk_bmap_iextent_xref(
 249        struct xfs_inode        *ip,
 250        struct xchk_bmap_info   *info,
 251        struct xfs_bmbt_irec    *irec)
 252{
 253        struct xfs_mount        *mp = info->sc->mp;
 254        xfs_agnumber_t          agno;
 255        xfs_agblock_t           agbno;
 256        xfs_extlen_t            len;
 257        int                     error;
 258
 259        agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
 260        agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
 261        len = irec->br_blockcount;
 262
 263        error = xchk_ag_init_existing(info->sc, agno, &info->sc->sa);
 264        if (!xchk_fblock_process_error(info->sc, info->whichfork,
 265                        irec->br_startoff, &error))
 266                goto out_free;
 267
 268        xchk_xref_is_used_space(info->sc, agbno, len);
 269        xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
 270        xchk_bmap_xref_rmap(info, irec, agbno);
 271        switch (info->whichfork) {
 272        case XFS_DATA_FORK:
 273                if (xfs_is_reflink_inode(info->sc->ip))
 274                        break;
 275                fallthrough;
 276        case XFS_ATTR_FORK:
 277                xchk_xref_is_not_shared(info->sc, agbno,
 278                                irec->br_blockcount);
 279                break;
 280        case XFS_COW_FORK:
 281                xchk_xref_is_cow_staging(info->sc, agbno,
 282                                irec->br_blockcount);
 283                break;
 284        }
 285
 286out_free:
 287        xchk_ag_free(info->sc, &info->sc->sa);
 288}
 289
 290/*
 291 * Directories and attr forks should never have blocks that can't be addressed
 292 * by a xfs_dablk_t.
 293 */
 294STATIC void
 295xchk_bmap_dirattr_extent(
 296        struct xfs_inode        *ip,
 297        struct xchk_bmap_info   *info,
 298        struct xfs_bmbt_irec    *irec)
 299{
 300        struct xfs_mount        *mp = ip->i_mount;
 301        xfs_fileoff_t           off;
 302
 303        if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK)
 304                return;
 305
 306        if (!xfs_verify_dablk(mp, irec->br_startoff))
 307                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 308                                irec->br_startoff);
 309
 310        off = irec->br_startoff + irec->br_blockcount - 1;
 311        if (!xfs_verify_dablk(mp, off))
 312                xchk_fblock_set_corrupt(info->sc, info->whichfork, off);
 313}
 314
 315/* Scrub a single extent record. */
 316STATIC int
 317xchk_bmap_iextent(
 318        struct xfs_inode        *ip,
 319        struct xchk_bmap_info   *info,
 320        struct xfs_bmbt_irec    *irec)
 321{
 322        struct xfs_mount        *mp = info->sc->mp;
 323        int                     error = 0;
 324
 325        /*
 326         * Check for out-of-order extents.  This record could have come
 327         * from the incore list, for which there is no ordering check.
 328         */
 329        if (irec->br_startoff < info->lastoff)
 330                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 331                                irec->br_startoff);
 332
 333        if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
 334                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 335                                irec->br_startoff);
 336
 337        xchk_bmap_dirattr_extent(ip, info, irec);
 338
 339        /* There should never be a "hole" extent in either extent list. */
 340        if (irec->br_startblock == HOLESTARTBLOCK)
 341                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 342                                irec->br_startoff);
 343
 344        /*
 345         * Check for delalloc extents.  We never iterate the ones in the
 346         * in-core extent scan, and we should never see these in the bmbt.
 347         */
 348        if (isnullstartblock(irec->br_startblock))
 349                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 350                                irec->br_startoff);
 351
 352        /* Make sure the extent points to a valid place. */
 353        if (irec->br_blockcount > MAXEXTLEN)
 354                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 355                                irec->br_startoff);
 356        if (info->is_rt &&
 357            !xfs_verify_rtext(mp, irec->br_startblock, irec->br_blockcount))
 358                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 359                                irec->br_startoff);
 360        if (!info->is_rt &&
 361            !xfs_verify_fsbext(mp, irec->br_startblock, irec->br_blockcount))
 362                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 363                                irec->br_startoff);
 364
 365        /* We don't allow unwritten extents on attr forks. */
 366        if (irec->br_state == XFS_EXT_UNWRITTEN &&
 367            info->whichfork == XFS_ATTR_FORK)
 368                xchk_fblock_set_corrupt(info->sc, info->whichfork,
 369                                irec->br_startoff);
 370
 371        if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 372                return 0;
 373
 374        if (info->is_rt)
 375                xchk_bmap_rt_iextent_xref(ip, info, irec);
 376        else
 377                xchk_bmap_iextent_xref(ip, info, irec);
 378
 379        info->lastoff = irec->br_startoff + irec->br_blockcount;
 380        return error;
 381}
 382
 383/* Scrub a bmbt record. */
 384STATIC int
 385xchk_bmapbt_rec(
 386        struct xchk_btree       *bs,
 387        const union xfs_btree_rec *rec)
 388{
 389        struct xfs_bmbt_irec    irec;
 390        struct xfs_bmbt_irec    iext_irec;
 391        struct xfs_iext_cursor  icur;
 392        struct xchk_bmap_info   *info = bs->private;
 393        struct xfs_inode        *ip = bs->cur->bc_ino.ip;
 394        struct xfs_buf          *bp = NULL;
 395        struct xfs_btree_block  *block;
 396        struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, info->whichfork);
 397        uint64_t                owner;
 398        int                     i;
 399
 400        /*
 401         * Check the owners of the btree blocks up to the level below
 402         * the root since the verifiers don't do that.
 403         */
 404        if (xfs_has_crc(bs->cur->bc_mp) &&
 405            bs->cur->bc_levels[0].ptr == 1) {
 406                for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
 407                        block = xfs_btree_get_block(bs->cur, i, &bp);
 408                        owner = be64_to_cpu(block->bb_u.l.bb_owner);
 409                        if (owner != ip->i_ino)
 410                                xchk_fblock_set_corrupt(bs->sc,
 411                                                info->whichfork, 0);
 412                }
 413        }
 414
 415        /*
 416         * Check that the incore extent tree contains an extent that matches
 417         * this one exactly.  We validate those cached bmaps later, so we don't
 418         * need to check them here.  If the incore extent tree was just loaded
 419         * from disk by the scrubber, we assume that its contents match what's
 420         * on disk (we still hold the ILOCK) and skip the equivalence check.
 421         */
 422        if (!info->was_loaded)
 423                return 0;
 424
 425        xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
 426        if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur,
 427                                &iext_irec) ||
 428            irec.br_startoff != iext_irec.br_startoff ||
 429            irec.br_startblock != iext_irec.br_startblock ||
 430            irec.br_blockcount != iext_irec.br_blockcount ||
 431            irec.br_state != iext_irec.br_state)
 432                xchk_fblock_set_corrupt(bs->sc, info->whichfork,
 433                                irec.br_startoff);
 434        return 0;
 435}
 436
 437/* Scan the btree records. */
 438STATIC int
 439xchk_bmap_btree(
 440        struct xfs_scrub        *sc,
 441        int                     whichfork,
 442        struct xchk_bmap_info   *info)
 443{
 444        struct xfs_owner_info   oinfo;
 445        struct xfs_ifork        *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
 446        struct xfs_mount        *mp = sc->mp;
 447        struct xfs_inode        *ip = sc->ip;
 448        struct xfs_btree_cur    *cur;
 449        int                     error;
 450
 451        /* Load the incore bmap cache if it's not loaded. */
 452        info->was_loaded = !xfs_need_iread_extents(ifp);
 453
 454        error = xfs_iread_extents(sc->tp, ip, whichfork);
 455        if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
 456                goto out;
 457
 458        /* Check the btree structure. */
 459        cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
 460        xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
 461        error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
 462        xfs_btree_del_cursor(cur, error);
 463out:
 464        return error;
 465}
 466
 467struct xchk_bmap_check_rmap_info {
 468        struct xfs_scrub        *sc;
 469        int                     whichfork;
 470        struct xfs_iext_cursor  icur;
 471};
 472
 473/* Can we find bmaps that fit this rmap? */
 474STATIC int
 475xchk_bmap_check_rmap(
 476        struct xfs_btree_cur            *cur,
 477        const struct xfs_rmap_irec      *rec,
 478        void                            *priv)
 479{
 480        struct xfs_bmbt_irec            irec;
 481        struct xfs_rmap_irec            check_rec;
 482        struct xchk_bmap_check_rmap_info        *sbcri = priv;
 483        struct xfs_ifork                *ifp;
 484        struct xfs_scrub                *sc = sbcri->sc;
 485        bool                            have_map;
 486
 487        /* Is this even the right fork? */
 488        if (rec->rm_owner != sc->ip->i_ino)
 489                return 0;
 490        if ((sbcri->whichfork == XFS_ATTR_FORK) ^
 491            !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
 492                return 0;
 493        if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
 494                return 0;
 495
 496        /* Now look up the bmbt record. */
 497        ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
 498        if (!ifp) {
 499                xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 500                                rec->rm_offset);
 501                goto out;
 502        }
 503        have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
 504                        &sbcri->icur, &irec);
 505        if (!have_map)
 506                xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 507                                rec->rm_offset);
 508        /*
 509         * bmap extent record lengths are constrained to 2^21 blocks in length
 510         * because of space constraints in the on-disk metadata structure.
 511         * However, rmap extent record lengths are constrained only by AG
 512         * length, so we have to loop through the bmbt to make sure that the
 513         * entire rmap is covered by bmbt records.
 514         */
 515        check_rec = *rec;
 516        while (have_map) {
 517                if (irec.br_startoff != check_rec.rm_offset)
 518                        xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 519                                        check_rec.rm_offset);
 520                if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
 521                                cur->bc_ag.pag->pag_agno,
 522                                check_rec.rm_startblock))
 523                        xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 524                                        check_rec.rm_offset);
 525                if (irec.br_blockcount > check_rec.rm_blockcount)
 526                        xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 527                                        check_rec.rm_offset);
 528                if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 529                        break;
 530                check_rec.rm_startblock += irec.br_blockcount;
 531                check_rec.rm_offset += irec.br_blockcount;
 532                check_rec.rm_blockcount -= irec.br_blockcount;
 533                if (check_rec.rm_blockcount == 0)
 534                        break;
 535                have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
 536                if (!have_map)
 537                        xchk_fblock_set_corrupt(sc, sbcri->whichfork,
 538                                        check_rec.rm_offset);
 539        }
 540
 541out:
 542        if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 543                return -ECANCELED;
 544        return 0;
 545}
 546
 547/* Make sure each rmap has a corresponding bmbt entry. */
 548STATIC int
 549xchk_bmap_check_ag_rmaps(
 550        struct xfs_scrub                *sc,
 551        int                             whichfork,
 552        struct xfs_perag                *pag)
 553{
 554        struct xchk_bmap_check_rmap_info        sbcri;
 555        struct xfs_btree_cur            *cur;
 556        struct xfs_buf                  *agf;
 557        int                             error;
 558
 559        error = xfs_alloc_read_agf(sc->mp, sc->tp, pag->pag_agno, 0, &agf);
 560        if (error)
 561                return error;
 562
 563        cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, pag);
 564
 565        sbcri.sc = sc;
 566        sbcri.whichfork = whichfork;
 567        error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
 568        if (error == -ECANCELED)
 569                error = 0;
 570
 571        xfs_btree_del_cursor(cur, error);
 572        xfs_trans_brelse(sc->tp, agf);
 573        return error;
 574}
 575
 576/* Make sure each rmap has a corresponding bmbt entry. */
 577STATIC int
 578xchk_bmap_check_rmaps(
 579        struct xfs_scrub        *sc,
 580        int                     whichfork)
 581{
 582        struct xfs_ifork        *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
 583        struct xfs_perag        *pag;
 584        xfs_agnumber_t          agno;
 585        bool                    zero_size;
 586        int                     error;
 587
 588        if (!xfs_has_rmapbt(sc->mp) ||
 589            whichfork == XFS_COW_FORK ||
 590            (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
 591                return 0;
 592
 593        /* Don't support realtime rmap checks yet. */
 594        if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
 595                return 0;
 596
 597        ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL);
 598
 599        /*
 600         * Only do this for complex maps that are in btree format, or for
 601         * situations where we would seem to have a size but zero extents.
 602         * The inode repair code can zap broken iforks, which means we have
 603         * to flag this bmap as corrupt if there are rmaps that need to be
 604         * reattached.
 605         */
 606
 607        if (whichfork == XFS_DATA_FORK)
 608                zero_size = i_size_read(VFS_I(sc->ip)) == 0;
 609        else
 610                zero_size = false;
 611
 612        if (ifp->if_format != XFS_DINODE_FMT_BTREE &&
 613            (zero_size || ifp->if_nextents > 0))
 614                return 0;
 615
 616        for_each_perag(sc->mp, agno, pag) {
 617                error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag);
 618                if (error)
 619                        break;
 620                if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 621                        break;
 622        }
 623        if (pag)
 624                xfs_perag_put(pag);
 625        return error;
 626}
 627
 628/*
 629 * Scrub an inode fork's block mappings.
 630 *
 631 * First we scan every record in every btree block, if applicable.
 632 * Then we unconditionally scan the incore extent cache.
 633 */
 634STATIC int
 635xchk_bmap(
 636        struct xfs_scrub        *sc,
 637        int                     whichfork)
 638{
 639        struct xfs_bmbt_irec    irec;
 640        struct xchk_bmap_info   info = { NULL };
 641        struct xfs_mount        *mp = sc->mp;
 642        struct xfs_inode        *ip = sc->ip;
 643        struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
 644        xfs_fileoff_t           endoff;
 645        struct xfs_iext_cursor  icur;
 646        int                     error = 0;
 647
 648        /* Non-existent forks can be ignored. */
 649        if (!ifp)
 650                goto out;
 651
 652        info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
 653        info.whichfork = whichfork;
 654        info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
 655        info.sc = sc;
 656
 657        switch (whichfork) {
 658        case XFS_COW_FORK:
 659                /* No CoW forks on non-reflink inodes/filesystems. */
 660                if (!xfs_is_reflink_inode(ip)) {
 661                        xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 662                        goto out;
 663                }
 664                break;
 665        case XFS_ATTR_FORK:
 666                if (!xfs_has_attr(mp) && !xfs_has_attr2(mp))
 667                        xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 668                break;
 669        default:
 670                ASSERT(whichfork == XFS_DATA_FORK);
 671                break;
 672        }
 673
 674        /* Check the fork values */
 675        switch (ifp->if_format) {
 676        case XFS_DINODE_FMT_UUID:
 677        case XFS_DINODE_FMT_DEV:
 678        case XFS_DINODE_FMT_LOCAL:
 679                /* No mappings to check. */
 680                goto out;
 681        case XFS_DINODE_FMT_EXTENTS:
 682                break;
 683        case XFS_DINODE_FMT_BTREE:
 684                if (whichfork == XFS_COW_FORK) {
 685                        xchk_fblock_set_corrupt(sc, whichfork, 0);
 686                        goto out;
 687                }
 688
 689                error = xchk_bmap_btree(sc, whichfork, &info);
 690                if (error)
 691                        goto out;
 692                break;
 693        default:
 694                xchk_fblock_set_corrupt(sc, whichfork, 0);
 695                goto out;
 696        }
 697
 698        if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 699                goto out;
 700
 701        /* Find the offset of the last extent in the mapping. */
 702        error = xfs_bmap_last_offset(ip, &endoff, whichfork);
 703        if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
 704                goto out;
 705
 706        /* Scrub extent records. */
 707        info.lastoff = 0;
 708        ifp = XFS_IFORK_PTR(ip, whichfork);
 709        for_each_xfs_iext(ifp, &icur, &irec) {
 710                if (xchk_should_terminate(sc, &error) ||
 711                    (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
 712                        goto out;
 713                if (isnullstartblock(irec.br_startblock))
 714                        continue;
 715                if (irec.br_startoff >= endoff) {
 716                        xchk_fblock_set_corrupt(sc, whichfork,
 717                                        irec.br_startoff);
 718                        goto out;
 719                }
 720                error = xchk_bmap_iextent(ip, &info, &irec);
 721                if (error)
 722                        goto out;
 723        }
 724
 725        error = xchk_bmap_check_rmaps(sc, whichfork);
 726        if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
 727                goto out;
 728out:
 729        return error;
 730}
 731
 732/* Scrub an inode's data fork. */
 733int
 734xchk_bmap_data(
 735        struct xfs_scrub        *sc)
 736{
 737        return xchk_bmap(sc, XFS_DATA_FORK);
 738}
 739
 740/* Scrub an inode's attr fork. */
 741int
 742xchk_bmap_attr(
 743        struct xfs_scrub        *sc)
 744{
 745        return xchk_bmap(sc, XFS_ATTR_FORK);
 746}
 747
 748/* Scrub an inode's CoW fork. */
 749int
 750xchk_bmap_cow(
 751        struct xfs_scrub        *sc)
 752{
 753        if (!xfs_is_reflink_inode(sc->ip))
 754                return -ENOENT;
 755
 756        return xchk_bmap(sc, XFS_COW_FORK);
 757}
 758