linux/fs/xfs/scrub/common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2017 Oracle.  All Rights Reserved.
   4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_trans_resv.h"
  11#include "xfs_mount.h"
  12#include "xfs_btree.h"
  13#include "xfs_log_format.h"
  14#include "xfs_trans.h"
  15#include "xfs_inode.h"
  16#include "xfs_icache.h"
  17#include "xfs_alloc.h"
  18#include "xfs_alloc_btree.h"
  19#include "xfs_ialloc.h"
  20#include "xfs_ialloc_btree.h"
  21#include "xfs_refcount_btree.h"
  22#include "xfs_rmap.h"
  23#include "xfs_rmap_btree.h"
  24#include "xfs_log.h"
  25#include "xfs_trans_priv.h"
  26#include "xfs_attr.h"
  27#include "xfs_reflink.h"
  28#include "xfs_ag.h"
  29#include "scrub/scrub.h"
  30#include "scrub/common.h"
  31#include "scrub/trace.h"
  32#include "scrub/repair.h"
  33#include "scrub/health.h"
  34
  35/* Common code for the metadata scrubbers. */
  36
  37/*
  38 * Handling operational errors.
  39 *
  40 * The *_process_error() family of functions are used to process error return
  41 * codes from functions called as part of a scrub operation.
  42 *
  43 * If there's no error, we return true to tell the caller that it's ok
  44 * to move on to the next check in its list.
  45 *
  46 * For non-verifier errors (e.g. ENOMEM) we return false to tell the
  47 * caller that something bad happened, and we preserve *error so that
  48 * the caller can return the *error up the stack to userspace.
  49 *
  50 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
  51 * OFLAG_CORRUPT in sm_flags and the *error is cleared.  In other words,
  52 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
  53 * not via return codes.  We return false to tell the caller that
  54 * something bad happened.  Since the error has been cleared, the caller
  55 * will (presumably) return that zero and scrubbing will move on to
  56 * whatever's next.
  57 *
  58 * ftrace can be used to record the precise metadata location and the
  59 * approximate code location of the failed operation.
  60 */
  61
  62/* Check for operational errors. */
  63static bool
  64__xchk_process_error(
  65        struct xfs_scrub        *sc,
  66        xfs_agnumber_t          agno,
  67        xfs_agblock_t           bno,
  68        int                     *error,
  69        __u32                   errflag,
  70        void                    *ret_ip)
  71{
  72        switch (*error) {
  73        case 0:
  74                return true;
  75        case -EDEADLOCK:
  76                /* Used to restart an op with deadlock avoidance. */
  77                trace_xchk_deadlock_retry(
  78                                sc->ip ? sc->ip : XFS_I(file_inode(sc->file)),
  79                                sc->sm, *error);
  80                break;
  81        case -EFSBADCRC:
  82        case -EFSCORRUPTED:
  83                /* Note the badness but don't abort. */
  84                sc->sm->sm_flags |= errflag;
  85                *error = 0;
  86                fallthrough;
  87        default:
  88                trace_xchk_op_error(sc, agno, bno, *error,
  89                                ret_ip);
  90                break;
  91        }
  92        return false;
  93}
  94
  95bool
  96xchk_process_error(
  97        struct xfs_scrub        *sc,
  98        xfs_agnumber_t          agno,
  99        xfs_agblock_t           bno,
 100        int                     *error)
 101{
 102        return __xchk_process_error(sc, agno, bno, error,
 103                        XFS_SCRUB_OFLAG_CORRUPT, __return_address);
 104}
 105
 106bool
 107xchk_xref_process_error(
 108        struct xfs_scrub        *sc,
 109        xfs_agnumber_t          agno,
 110        xfs_agblock_t           bno,
 111        int                     *error)
 112{
 113        return __xchk_process_error(sc, agno, bno, error,
 114                        XFS_SCRUB_OFLAG_XFAIL, __return_address);
 115}
 116
 117/* Check for operational errors for a file offset. */
 118static bool
 119__xchk_fblock_process_error(
 120        struct xfs_scrub        *sc,
 121        int                     whichfork,
 122        xfs_fileoff_t           offset,
 123        int                     *error,
 124        __u32                   errflag,
 125        void                    *ret_ip)
 126{
 127        switch (*error) {
 128        case 0:
 129                return true;
 130        case -EDEADLOCK:
 131                /* Used to restart an op with deadlock avoidance. */
 132                trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
 133                break;
 134        case -EFSBADCRC:
 135        case -EFSCORRUPTED:
 136                /* Note the badness but don't abort. */
 137                sc->sm->sm_flags |= errflag;
 138                *error = 0;
 139                fallthrough;
 140        default:
 141                trace_xchk_file_op_error(sc, whichfork, offset, *error,
 142                                ret_ip);
 143                break;
 144        }
 145        return false;
 146}
 147
 148bool
 149xchk_fblock_process_error(
 150        struct xfs_scrub        *sc,
 151        int                     whichfork,
 152        xfs_fileoff_t           offset,
 153        int                     *error)
 154{
 155        return __xchk_fblock_process_error(sc, whichfork, offset, error,
 156                        XFS_SCRUB_OFLAG_CORRUPT, __return_address);
 157}
 158
 159bool
 160xchk_fblock_xref_process_error(
 161        struct xfs_scrub        *sc,
 162        int                     whichfork,
 163        xfs_fileoff_t           offset,
 164        int                     *error)
 165{
 166        return __xchk_fblock_process_error(sc, whichfork, offset, error,
 167                        XFS_SCRUB_OFLAG_XFAIL, __return_address);
 168}
 169
 170/*
 171 * Handling scrub corruption/optimization/warning checks.
 172 *
 173 * The *_set_{corrupt,preen,warning}() family of functions are used to
 174 * record the presence of metadata that is incorrect (corrupt), could be
 175 * optimized somehow (preen), or should be flagged for administrative
 176 * review but is not incorrect (warn).
 177 *
 178 * ftrace can be used to record the precise metadata location and
 179 * approximate code location of the failed check.
 180 */
 181
 182/* Record a block which could be optimized. */
 183void
 184xchk_block_set_preen(
 185        struct xfs_scrub        *sc,
 186        struct xfs_buf          *bp)
 187{
 188        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
 189        trace_xchk_block_preen(sc, xfs_buf_daddr(bp), __return_address);
 190}
 191
 192/*
 193 * Record an inode which could be optimized.  The trace data will
 194 * include the block given by bp if bp is given; otherwise it will use
 195 * the block location of the inode record itself.
 196 */
 197void
 198xchk_ino_set_preen(
 199        struct xfs_scrub        *sc,
 200        xfs_ino_t               ino)
 201{
 202        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
 203        trace_xchk_ino_preen(sc, ino, __return_address);
 204}
 205
 206/* Record something being wrong with the filesystem primary superblock. */
 207void
 208xchk_set_corrupt(
 209        struct xfs_scrub        *sc)
 210{
 211        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 212        trace_xchk_fs_error(sc, 0, __return_address);
 213}
 214
 215/* Record a corrupt block. */
 216void
 217xchk_block_set_corrupt(
 218        struct xfs_scrub        *sc,
 219        struct xfs_buf          *bp)
 220{
 221        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 222        trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
 223}
 224
 225/* Record a corruption while cross-referencing. */
 226void
 227xchk_block_xref_set_corrupt(
 228        struct xfs_scrub        *sc,
 229        struct xfs_buf          *bp)
 230{
 231        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
 232        trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
 233}
 234
 235/*
 236 * Record a corrupt inode.  The trace data will include the block given
 237 * by bp if bp is given; otherwise it will use the block location of the
 238 * inode record itself.
 239 */
 240void
 241xchk_ino_set_corrupt(
 242        struct xfs_scrub        *sc,
 243        xfs_ino_t               ino)
 244{
 245        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 246        trace_xchk_ino_error(sc, ino, __return_address);
 247}
 248
 249/* Record a corruption while cross-referencing with an inode. */
 250void
 251xchk_ino_xref_set_corrupt(
 252        struct xfs_scrub        *sc,
 253        xfs_ino_t               ino)
 254{
 255        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
 256        trace_xchk_ino_error(sc, ino, __return_address);
 257}
 258
 259/* Record corruption in a block indexed by a file fork. */
 260void
 261xchk_fblock_set_corrupt(
 262        struct xfs_scrub        *sc,
 263        int                     whichfork,
 264        xfs_fileoff_t           offset)
 265{
 266        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 267        trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
 268}
 269
 270/* Record a corruption while cross-referencing a fork block. */
 271void
 272xchk_fblock_xref_set_corrupt(
 273        struct xfs_scrub        *sc,
 274        int                     whichfork,
 275        xfs_fileoff_t           offset)
 276{
 277        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
 278        trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
 279}
 280
 281/*
 282 * Warn about inodes that need administrative review but is not
 283 * incorrect.
 284 */
 285void
 286xchk_ino_set_warning(
 287        struct xfs_scrub        *sc,
 288        xfs_ino_t               ino)
 289{
 290        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
 291        trace_xchk_ino_warning(sc, ino, __return_address);
 292}
 293
 294/* Warn about a block indexed by a file fork that needs review. */
 295void
 296xchk_fblock_set_warning(
 297        struct xfs_scrub        *sc,
 298        int                     whichfork,
 299        xfs_fileoff_t           offset)
 300{
 301        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
 302        trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
 303}
 304
 305/* Signal an incomplete scrub. */
 306void
 307xchk_set_incomplete(
 308        struct xfs_scrub        *sc)
 309{
 310        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
 311        trace_xchk_incomplete(sc, __return_address);
 312}
 313
 314/*
 315 * rmap scrubbing -- compute the number of blocks with a given owner,
 316 * at least according to the reverse mapping data.
 317 */
 318
 319struct xchk_rmap_ownedby_info {
 320        const struct xfs_owner_info     *oinfo;
 321        xfs_filblks_t                   *blocks;
 322};
 323
 324STATIC int
 325xchk_count_rmap_ownedby_irec(
 326        struct xfs_btree_cur            *cur,
 327        const struct xfs_rmap_irec      *rec,
 328        void                            *priv)
 329{
 330        struct xchk_rmap_ownedby_info   *sroi = priv;
 331        bool                            irec_attr;
 332        bool                            oinfo_attr;
 333
 334        irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
 335        oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
 336
 337        if (rec->rm_owner != sroi->oinfo->oi_owner)
 338                return 0;
 339
 340        if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
 341                (*sroi->blocks) += rec->rm_blockcount;
 342
 343        return 0;
 344}
 345
 346/*
 347 * Calculate the number of blocks the rmap thinks are owned by something.
 348 * The caller should pass us an rmapbt cursor.
 349 */
 350int
 351xchk_count_rmap_ownedby_ag(
 352        struct xfs_scrub                *sc,
 353        struct xfs_btree_cur            *cur,
 354        const struct xfs_owner_info     *oinfo,
 355        xfs_filblks_t                   *blocks)
 356{
 357        struct xchk_rmap_ownedby_info   sroi = {
 358                .oinfo                  = oinfo,
 359                .blocks                 = blocks,
 360        };
 361
 362        *blocks = 0;
 363        return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
 364                        &sroi);
 365}
 366
 367/*
 368 * AG scrubbing
 369 *
 370 * These helpers facilitate locking an allocation group's header
 371 * buffers, setting up cursors for all btrees that are present, and
 372 * cleaning everything up once we're through.
 373 */
 374
 375/* Decide if we want to return an AG header read failure. */
 376static inline bool
 377want_ag_read_header_failure(
 378        struct xfs_scrub        *sc,
 379        unsigned int            type)
 380{
 381        /* Return all AG header read failures when scanning btrees. */
 382        if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
 383            sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
 384            sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
 385                return true;
 386        /*
 387         * If we're scanning a given type of AG header, we only want to
 388         * see read failures from that specific header.  We'd like the
 389         * other headers to cross-check them, but this isn't required.
 390         */
 391        if (sc->sm->sm_type == type)
 392                return true;
 393        return false;
 394}
 395
 396/*
 397 * Grab the perag structure and all the headers for an AG.
 398 *
 399 * The headers should be released by xchk_ag_free, but as a fail safe we attach
 400 * all the buffers we grab to the scrub transaction so they'll all be freed
 401 * when we cancel it.  Returns ENOENT if we can't grab the perag structure.
 402 */
 403int
 404xchk_ag_read_headers(
 405        struct xfs_scrub        *sc,
 406        xfs_agnumber_t          agno,
 407        struct xchk_ag          *sa)
 408{
 409        struct xfs_mount        *mp = sc->mp;
 410        int                     error;
 411
 412        ASSERT(!sa->pag);
 413        sa->pag = xfs_perag_get(mp, agno);
 414        if (!sa->pag)
 415                return -ENOENT;
 416
 417        error = xfs_ialloc_read_agi(mp, sc->tp, agno, &sa->agi_bp);
 418        if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
 419                return error;
 420
 421        error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &sa->agf_bp);
 422        if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
 423                return error;
 424
 425        error = xfs_alloc_read_agfl(mp, sc->tp, agno, &sa->agfl_bp);
 426        if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
 427                return error;
 428
 429        return 0;
 430}
 431
 432/* Release all the AG btree cursors. */
 433void
 434xchk_ag_btcur_free(
 435        struct xchk_ag          *sa)
 436{
 437        if (sa->refc_cur)
 438                xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
 439        if (sa->rmap_cur)
 440                xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
 441        if (sa->fino_cur)
 442                xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
 443        if (sa->ino_cur)
 444                xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
 445        if (sa->cnt_cur)
 446                xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
 447        if (sa->bno_cur)
 448                xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
 449
 450        sa->refc_cur = NULL;
 451        sa->rmap_cur = NULL;
 452        sa->fino_cur = NULL;
 453        sa->ino_cur = NULL;
 454        sa->bno_cur = NULL;
 455        sa->cnt_cur = NULL;
 456}
 457
 458/* Initialize all the btree cursors for an AG. */
 459void
 460xchk_ag_btcur_init(
 461        struct xfs_scrub        *sc,
 462        struct xchk_ag          *sa)
 463{
 464        struct xfs_mount        *mp = sc->mp;
 465
 466        if (sa->agf_bp &&
 467            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) {
 468                /* Set up a bnobt cursor for cross-referencing. */
 469                sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
 470                                sa->pag, XFS_BTNUM_BNO);
 471        }
 472
 473        if (sa->agf_bp &&
 474            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) {
 475                /* Set up a cntbt cursor for cross-referencing. */
 476                sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
 477                                sa->pag, XFS_BTNUM_CNT);
 478        }
 479
 480        /* Set up a inobt cursor for cross-referencing. */
 481        if (sa->agi_bp &&
 482            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) {
 483                sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
 484                                sa->pag, XFS_BTNUM_INO);
 485        }
 486
 487        /* Set up a finobt cursor for cross-referencing. */
 488        if (sa->agi_bp && xfs_has_finobt(mp) &&
 489            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) {
 490                sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
 491                                sa->pag, XFS_BTNUM_FINO);
 492        }
 493
 494        /* Set up a rmapbt cursor for cross-referencing. */
 495        if (sa->agf_bp && xfs_has_rmapbt(mp) &&
 496            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) {
 497                sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
 498                                sa->pag);
 499        }
 500
 501        /* Set up a refcountbt cursor for cross-referencing. */
 502        if (sa->agf_bp && xfs_has_reflink(mp) &&
 503            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) {
 504                sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
 505                                sa->agf_bp, sa->pag);
 506        }
 507}
 508
 509/* Release the AG header context and btree cursors. */
 510void
 511xchk_ag_free(
 512        struct xfs_scrub        *sc,
 513        struct xchk_ag          *sa)
 514{
 515        xchk_ag_btcur_free(sa);
 516        if (sa->agfl_bp) {
 517                xfs_trans_brelse(sc->tp, sa->agfl_bp);
 518                sa->agfl_bp = NULL;
 519        }
 520        if (sa->agf_bp) {
 521                xfs_trans_brelse(sc->tp, sa->agf_bp);
 522                sa->agf_bp = NULL;
 523        }
 524        if (sa->agi_bp) {
 525                xfs_trans_brelse(sc->tp, sa->agi_bp);
 526                sa->agi_bp = NULL;
 527        }
 528        if (sa->pag) {
 529                xfs_perag_put(sa->pag);
 530                sa->pag = NULL;
 531        }
 532}
 533
 534/*
 535 * For scrub, grab the perag structure, the AGI, and the AGF headers, in that
 536 * order.  Locking order requires us to get the AGI before the AGF.  We use the
 537 * transaction to avoid deadlocking on crosslinked metadata buffers; either the
 538 * caller passes one in (bmap scrub) or we have to create a transaction
 539 * ourselves.  Returns ENOENT if the perag struct cannot be grabbed.
 540 */
 541int
 542xchk_ag_init(
 543        struct xfs_scrub        *sc,
 544        xfs_agnumber_t          agno,
 545        struct xchk_ag          *sa)
 546{
 547        int                     error;
 548
 549        error = xchk_ag_read_headers(sc, agno, sa);
 550        if (error)
 551                return error;
 552
 553        xchk_ag_btcur_init(sc, sa);
 554        return 0;
 555}
 556
 557/* Per-scrubber setup functions */
 558
 559/*
 560 * Grab an empty transaction so that we can re-grab locked buffers if
 561 * one of our btrees turns out to be cyclic.
 562 *
 563 * If we're going to repair something, we need to ask for the largest possible
 564 * log reservation so that we can handle the worst case scenario for metadata
 565 * updates while rebuilding a metadata item.  We also need to reserve as many
 566 * blocks in the head transaction as we think we're going to need to rebuild
 567 * the metadata object.
 568 */
 569int
 570xchk_trans_alloc(
 571        struct xfs_scrub        *sc,
 572        uint                    resblks)
 573{
 574        if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
 575                return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
 576                                resblks, 0, 0, &sc->tp);
 577
 578        return xfs_trans_alloc_empty(sc->mp, &sc->tp);
 579}
 580
 581/* Set us up with a transaction and an empty context. */
 582int
 583xchk_setup_fs(
 584        struct xfs_scrub        *sc)
 585{
 586        uint                    resblks;
 587
 588        resblks = xrep_calc_ag_resblks(sc);
 589        return xchk_trans_alloc(sc, resblks);
 590}
 591
 592/* Set us up with AG headers and btree cursors. */
 593int
 594xchk_setup_ag_btree(
 595        struct xfs_scrub        *sc,
 596        bool                    force_log)
 597{
 598        struct xfs_mount        *mp = sc->mp;
 599        int                     error;
 600
 601        /*
 602         * If the caller asks us to checkpont the log, do so.  This
 603         * expensive operation should be performed infrequently and only
 604         * as a last resort.  Any caller that sets force_log should
 605         * document why they need to do so.
 606         */
 607        if (force_log) {
 608                error = xchk_checkpoint_log(mp);
 609                if (error)
 610                        return error;
 611        }
 612
 613        error = xchk_setup_fs(sc);
 614        if (error)
 615                return error;
 616
 617        return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
 618}
 619
 620/* Push everything out of the log onto disk. */
 621int
 622xchk_checkpoint_log(
 623        struct xfs_mount        *mp)
 624{
 625        int                     error;
 626
 627        error = xfs_log_force(mp, XFS_LOG_SYNC);
 628        if (error)
 629                return error;
 630        xfs_ail_push_all_sync(mp->m_ail);
 631        return 0;
 632}
 633
 634/*
 635 * Given an inode and the scrub control structure, grab either the
 636 * inode referenced in the control structure or the inode passed in.
 637 * The inode is not locked.
 638 */
 639int
 640xchk_get_inode(
 641        struct xfs_scrub        *sc)
 642{
 643        struct xfs_imap         imap;
 644        struct xfs_mount        *mp = sc->mp;
 645        struct xfs_inode        *ip_in = XFS_I(file_inode(sc->file));
 646        struct xfs_inode        *ip = NULL;
 647        int                     error;
 648
 649        /* We want to scan the inode we already had opened. */
 650        if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
 651                sc->ip = ip_in;
 652                return 0;
 653        }
 654
 655        /* Look up the inode, see if the generation number matches. */
 656        if (xfs_internal_inum(mp, sc->sm->sm_ino))
 657                return -ENOENT;
 658        error = xfs_iget(mp, NULL, sc->sm->sm_ino,
 659                        XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
 660        switch (error) {
 661        case -ENOENT:
 662                /* Inode doesn't exist, just bail out. */
 663                return error;
 664        case 0:
 665                /* Got an inode, continue. */
 666                break;
 667        case -EINVAL:
 668                /*
 669                 * -EINVAL with IGET_UNTRUSTED could mean one of several
 670                 * things: userspace gave us an inode number that doesn't
 671                 * correspond to fs space, or doesn't have an inobt entry;
 672                 * or it could simply mean that the inode buffer failed the
 673                 * read verifiers.
 674                 *
 675                 * Try just the inode mapping lookup -- if it succeeds, then
 676                 * the inode buffer verifier failed and something needs fixing.
 677                 * Otherwise, we really couldn't find it so tell userspace
 678                 * that it no longer exists.
 679                 */
 680                error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
 681                                XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
 682                if (error)
 683                        return -ENOENT;
 684                error = -EFSCORRUPTED;
 685                fallthrough;
 686        default:
 687                trace_xchk_op_error(sc,
 688                                XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
 689                                XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
 690                                error, __return_address);
 691                return error;
 692        }
 693        if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
 694                xfs_irele(ip);
 695                return -ENOENT;
 696        }
 697
 698        sc->ip = ip;
 699        return 0;
 700}
 701
 702/* Set us up to scrub a file's contents. */
 703int
 704xchk_setup_inode_contents(
 705        struct xfs_scrub        *sc,
 706        unsigned int            resblks)
 707{
 708        int                     error;
 709
 710        error = xchk_get_inode(sc);
 711        if (error)
 712                return error;
 713
 714        /* Got the inode, lock it and we're ready to go. */
 715        sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
 716        xfs_ilock(sc->ip, sc->ilock_flags);
 717        error = xchk_trans_alloc(sc, resblks);
 718        if (error)
 719                goto out;
 720        sc->ilock_flags |= XFS_ILOCK_EXCL;
 721        xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
 722
 723out:
 724        /* scrub teardown will unlock and release the inode for us */
 725        return error;
 726}
 727
 728/*
 729 * Predicate that decides if we need to evaluate the cross-reference check.
 730 * If there was an error accessing the cross-reference btree, just delete
 731 * the cursor and skip the check.
 732 */
 733bool
 734xchk_should_check_xref(
 735        struct xfs_scrub        *sc,
 736        int                     *error,
 737        struct xfs_btree_cur    **curpp)
 738{
 739        /* No point in xref if we already know we're corrupt. */
 740        if (xchk_skip_xref(sc->sm))
 741                return false;
 742
 743        if (*error == 0)
 744                return true;
 745
 746        if (curpp) {
 747                /* If we've already given up on xref, just bail out. */
 748                if (!*curpp)
 749                        return false;
 750
 751                /* xref error, delete cursor and bail out. */
 752                xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
 753                *curpp = NULL;
 754        }
 755
 756        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
 757        trace_xchk_xref_error(sc, *error, __return_address);
 758
 759        /*
 760         * Errors encountered during cross-referencing with another
 761         * data structure should not cause this scrubber to abort.
 762         */
 763        *error = 0;
 764        return false;
 765}
 766
 767/* Run the structure verifiers on in-memory buffers to detect bad memory. */
 768void
 769xchk_buffer_recheck(
 770        struct xfs_scrub        *sc,
 771        struct xfs_buf          *bp)
 772{
 773        xfs_failaddr_t          fa;
 774
 775        if (bp->b_ops == NULL) {
 776                xchk_block_set_corrupt(sc, bp);
 777                return;
 778        }
 779        if (bp->b_ops->verify_struct == NULL) {
 780                xchk_set_incomplete(sc);
 781                return;
 782        }
 783        fa = bp->b_ops->verify_struct(bp);
 784        if (!fa)
 785                return;
 786        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 787        trace_xchk_block_error(sc, xfs_buf_daddr(bp), fa);
 788}
 789
 790/*
 791 * Scrub the attr/data forks of a metadata inode.  The metadata inode must be
 792 * pointed to by sc->ip and the ILOCK must be held.
 793 */
 794int
 795xchk_metadata_inode_forks(
 796        struct xfs_scrub        *sc)
 797{
 798        __u32                   smtype;
 799        bool                    shared;
 800        int                     error;
 801
 802        if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 803                return 0;
 804
 805        /* Metadata inodes don't live on the rt device. */
 806        if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) {
 807                xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 808                return 0;
 809        }
 810
 811        /* They should never participate in reflink. */
 812        if (xfs_is_reflink_inode(sc->ip)) {
 813                xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 814                return 0;
 815        }
 816
 817        /* They also should never have extended attributes. */
 818        if (xfs_inode_hasattr(sc->ip)) {
 819                xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 820                return 0;
 821        }
 822
 823        /* Invoke the data fork scrubber. */
 824        smtype = sc->sm->sm_type;
 825        sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
 826        error = xchk_bmap_data(sc);
 827        sc->sm->sm_type = smtype;
 828        if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
 829                return error;
 830
 831        /* Look for incorrect shared blocks. */
 832        if (xfs_has_reflink(sc->mp)) {
 833                error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
 834                                &shared);
 835                if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
 836                                &error))
 837                        return error;
 838                if (shared)
 839                        xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 840        }
 841
 842        return error;
 843}
 844
 845/*
 846 * Try to lock an inode in violation of the usual locking order rules.  For
 847 * example, trying to get the IOLOCK while in transaction context, or just
 848 * plain breaking AG-order or inode-order inode locking rules.  Either way,
 849 * the only way to avoid an ABBA deadlock is to use trylock and back off if
 850 * we can't.
 851 */
 852int
 853xchk_ilock_inverted(
 854        struct xfs_inode        *ip,
 855        uint                    lock_mode)
 856{
 857        int                     i;
 858
 859        for (i = 0; i < 20; i++) {
 860                if (xfs_ilock_nowait(ip, lock_mode))
 861                        return 0;
 862                delay(1);
 863        }
 864        return -EDEADLOCK;
 865}
 866
 867/* Pause background reaping of resources. */
 868void
 869xchk_stop_reaping(
 870        struct xfs_scrub        *sc)
 871{
 872        sc->flags |= XCHK_REAPING_DISABLED;
 873        xfs_blockgc_stop(sc->mp);
 874        xfs_inodegc_stop(sc->mp);
 875}
 876
 877/* Restart background reaping of resources. */
 878void
 879xchk_start_reaping(
 880        struct xfs_scrub        *sc)
 881{
 882        /*
 883         * Readonly filesystems do not perform inactivation or speculative
 884         * preallocation, so there's no need to restart the workers.
 885         */
 886        if (!xfs_is_readonly(sc->mp)) {
 887                xfs_inodegc_start(sc->mp);
 888                xfs_blockgc_start(sc->mp);
 889        }
 890        sc->flags &= ~XCHK_REAPING_DISABLED;
 891}
 892