linux/fs/xfs/scrub/common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2017 Oracle.  All Rights Reserved.
   4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_trans_resv.h"
  11#include "xfs_mount.h"
  12#include "xfs_btree.h"
  13#include "xfs_log_format.h"
  14#include "xfs_trans.h"
  15#include "xfs_sb.h"
  16#include "xfs_inode.h"
  17#include "xfs_icache.h"
  18#include "xfs_alloc.h"
  19#include "xfs_alloc_btree.h"
  20#include "xfs_ialloc.h"
  21#include "xfs_ialloc_btree.h"
  22#include "xfs_refcount_btree.h"
  23#include "xfs_rmap.h"
  24#include "xfs_rmap_btree.h"
  25#include "xfs_log.h"
  26#include "xfs_trans_priv.h"
  27#include "xfs_attr.h"
  28#include "xfs_reflink.h"
  29#include "scrub/scrub.h"
  30#include "scrub/common.h"
  31#include "scrub/trace.h"
  32#include "scrub/repair.h"
  33#include "scrub/health.h"
  34
  35/* Common code for the metadata scrubbers. */
  36
  37/*
  38 * Handling operational errors.
  39 *
  40 * The *_process_error() family of functions are used to process error return
  41 * codes from functions called as part of a scrub operation.
  42 *
  43 * If there's no error, we return true to tell the caller that it's ok
  44 * to move on to the next check in its list.
  45 *
  46 * For non-verifier errors (e.g. ENOMEM) we return false to tell the
  47 * caller that something bad happened, and we preserve *error so that
  48 * the caller can return the *error up the stack to userspace.
  49 *
  50 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
  51 * OFLAG_CORRUPT in sm_flags and the *error is cleared.  In other words,
  52 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
  53 * not via return codes.  We return false to tell the caller that
  54 * something bad happened.  Since the error has been cleared, the caller
  55 * will (presumably) return that zero and scrubbing will move on to
  56 * whatever's next.
  57 *
  58 * ftrace can be used to record the precise metadata location and the
  59 * approximate code location of the failed operation.
  60 */
  61
  62/* Check for operational errors. */
  63static bool
  64__xchk_process_error(
  65        struct xfs_scrub        *sc,
  66        xfs_agnumber_t          agno,
  67        xfs_agblock_t           bno,
  68        int                     *error,
  69        __u32                   errflag,
  70        void                    *ret_ip)
  71{
  72        switch (*error) {
  73        case 0:
  74                return true;
  75        case -EDEADLOCK:
  76                /* Used to restart an op with deadlock avoidance. */
  77                trace_xchk_deadlock_retry(
  78                                sc->ip ? sc->ip : XFS_I(file_inode(sc->file)),
  79                                sc->sm, *error);
  80                break;
  81        case -EFSBADCRC:
  82        case -EFSCORRUPTED:
  83                /* Note the badness but don't abort. */
  84                sc->sm->sm_flags |= errflag;
  85                *error = 0;
  86                /* fall through */
  87        default:
  88                trace_xchk_op_error(sc, agno, bno, *error,
  89                                ret_ip);
  90                break;
  91        }
  92        return false;
  93}
  94
  95bool
  96xchk_process_error(
  97        struct xfs_scrub        *sc,
  98        xfs_agnumber_t          agno,
  99        xfs_agblock_t           bno,
 100        int                     *error)
 101{
 102        return __xchk_process_error(sc, agno, bno, error,
 103                        XFS_SCRUB_OFLAG_CORRUPT, __return_address);
 104}
 105
 106bool
 107xchk_xref_process_error(
 108        struct xfs_scrub        *sc,
 109        xfs_agnumber_t          agno,
 110        xfs_agblock_t           bno,
 111        int                     *error)
 112{
 113        return __xchk_process_error(sc, agno, bno, error,
 114                        XFS_SCRUB_OFLAG_XFAIL, __return_address);
 115}
 116
 117/* Check for operational errors for a file offset. */
 118static bool
 119__xchk_fblock_process_error(
 120        struct xfs_scrub        *sc,
 121        int                     whichfork,
 122        xfs_fileoff_t           offset,
 123        int                     *error,
 124        __u32                   errflag,
 125        void                    *ret_ip)
 126{
 127        switch (*error) {
 128        case 0:
 129                return true;
 130        case -EDEADLOCK:
 131                /* Used to restart an op with deadlock avoidance. */
 132                trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
 133                break;
 134        case -EFSBADCRC:
 135        case -EFSCORRUPTED:
 136                /* Note the badness but don't abort. */
 137                sc->sm->sm_flags |= errflag;
 138                *error = 0;
 139                /* fall through */
 140        default:
 141                trace_xchk_file_op_error(sc, whichfork, offset, *error,
 142                                ret_ip);
 143                break;
 144        }
 145        return false;
 146}
 147
 148bool
 149xchk_fblock_process_error(
 150        struct xfs_scrub        *sc,
 151        int                     whichfork,
 152        xfs_fileoff_t           offset,
 153        int                     *error)
 154{
 155        return __xchk_fblock_process_error(sc, whichfork, offset, error,
 156                        XFS_SCRUB_OFLAG_CORRUPT, __return_address);
 157}
 158
 159bool
 160xchk_fblock_xref_process_error(
 161        struct xfs_scrub        *sc,
 162        int                     whichfork,
 163        xfs_fileoff_t           offset,
 164        int                     *error)
 165{
 166        return __xchk_fblock_process_error(sc, whichfork, offset, error,
 167                        XFS_SCRUB_OFLAG_XFAIL, __return_address);
 168}
 169
 170/*
 171 * Handling scrub corruption/optimization/warning checks.
 172 *
 173 * The *_set_{corrupt,preen,warning}() family of functions are used to
 174 * record the presence of metadata that is incorrect (corrupt), could be
 175 * optimized somehow (preen), or should be flagged for administrative
 176 * review but is not incorrect (warn).
 177 *
 178 * ftrace can be used to record the precise metadata location and
 179 * approximate code location of the failed check.
 180 */
 181
 182/* Record a block which could be optimized. */
 183void
 184xchk_block_set_preen(
 185        struct xfs_scrub        *sc,
 186        struct xfs_buf          *bp)
 187{
 188        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
 189        trace_xchk_block_preen(sc, bp->b_bn, __return_address);
 190}
 191
 192/*
 193 * Record an inode which could be optimized.  The trace data will
 194 * include the block given by bp if bp is given; otherwise it will use
 195 * the block location of the inode record itself.
 196 */
 197void
 198xchk_ino_set_preen(
 199        struct xfs_scrub        *sc,
 200        xfs_ino_t               ino)
 201{
 202        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
 203        trace_xchk_ino_preen(sc, ino, __return_address);
 204}
 205
 206/* Record something being wrong with the filesystem primary superblock. */
 207void
 208xchk_set_corrupt(
 209        struct xfs_scrub        *sc)
 210{
 211        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 212        trace_xchk_fs_error(sc, 0, __return_address);
 213}
 214
 215/* Record a corrupt block. */
 216void
 217xchk_block_set_corrupt(
 218        struct xfs_scrub        *sc,
 219        struct xfs_buf          *bp)
 220{
 221        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 222        trace_xchk_block_error(sc, bp->b_bn, __return_address);
 223}
 224
 225/* Record a corruption while cross-referencing. */
 226void
 227xchk_block_xref_set_corrupt(
 228        struct xfs_scrub        *sc,
 229        struct xfs_buf          *bp)
 230{
 231        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
 232        trace_xchk_block_error(sc, bp->b_bn, __return_address);
 233}
 234
 235/*
 236 * Record a corrupt inode.  The trace data will include the block given
 237 * by bp if bp is given; otherwise it will use the block location of the
 238 * inode record itself.
 239 */
 240void
 241xchk_ino_set_corrupt(
 242        struct xfs_scrub        *sc,
 243        xfs_ino_t               ino)
 244{
 245        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 246        trace_xchk_ino_error(sc, ino, __return_address);
 247}
 248
 249/* Record a corruption while cross-referencing with an inode. */
 250void
 251xchk_ino_xref_set_corrupt(
 252        struct xfs_scrub        *sc,
 253        xfs_ino_t               ino)
 254{
 255        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
 256        trace_xchk_ino_error(sc, ino, __return_address);
 257}
 258
 259/* Record corruption in a block indexed by a file fork. */
 260void
 261xchk_fblock_set_corrupt(
 262        struct xfs_scrub        *sc,
 263        int                     whichfork,
 264        xfs_fileoff_t           offset)
 265{
 266        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 267        trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
 268}
 269
 270/* Record a corruption while cross-referencing a fork block. */
 271void
 272xchk_fblock_xref_set_corrupt(
 273        struct xfs_scrub        *sc,
 274        int                     whichfork,
 275        xfs_fileoff_t           offset)
 276{
 277        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
 278        trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
 279}
 280
 281/*
 282 * Warn about inodes that need administrative review but is not
 283 * incorrect.
 284 */
 285void
 286xchk_ino_set_warning(
 287        struct xfs_scrub        *sc,
 288        xfs_ino_t               ino)
 289{
 290        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
 291        trace_xchk_ino_warning(sc, ino, __return_address);
 292}
 293
 294/* Warn about a block indexed by a file fork that needs review. */
 295void
 296xchk_fblock_set_warning(
 297        struct xfs_scrub        *sc,
 298        int                     whichfork,
 299        xfs_fileoff_t           offset)
 300{
 301        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
 302        trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
 303}
 304
 305/* Signal an incomplete scrub. */
 306void
 307xchk_set_incomplete(
 308        struct xfs_scrub        *sc)
 309{
 310        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
 311        trace_xchk_incomplete(sc, __return_address);
 312}
 313
 314/*
 315 * rmap scrubbing -- compute the number of blocks with a given owner,
 316 * at least according to the reverse mapping data.
 317 */
 318
 319struct xchk_rmap_ownedby_info {
 320        const struct xfs_owner_info     *oinfo;
 321        xfs_filblks_t                   *blocks;
 322};
 323
 324STATIC int
 325xchk_count_rmap_ownedby_irec(
 326        struct xfs_btree_cur            *cur,
 327        struct xfs_rmap_irec            *rec,
 328        void                            *priv)
 329{
 330        struct xchk_rmap_ownedby_info   *sroi = priv;
 331        bool                            irec_attr;
 332        bool                            oinfo_attr;
 333
 334        irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
 335        oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
 336
 337        if (rec->rm_owner != sroi->oinfo->oi_owner)
 338                return 0;
 339
 340        if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
 341                (*sroi->blocks) += rec->rm_blockcount;
 342
 343        return 0;
 344}
 345
 346/*
 347 * Calculate the number of blocks the rmap thinks are owned by something.
 348 * The caller should pass us an rmapbt cursor.
 349 */
 350int
 351xchk_count_rmap_ownedby_ag(
 352        struct xfs_scrub                *sc,
 353        struct xfs_btree_cur            *cur,
 354        const struct xfs_owner_info     *oinfo,
 355        xfs_filblks_t                   *blocks)
 356{
 357        struct xchk_rmap_ownedby_info   sroi = {
 358                .oinfo                  = oinfo,
 359                .blocks                 = blocks,
 360        };
 361
 362        *blocks = 0;
 363        return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
 364                        &sroi);
 365}
 366
 367/*
 368 * AG scrubbing
 369 *
 370 * These helpers facilitate locking an allocation group's header
 371 * buffers, setting up cursors for all btrees that are present, and
 372 * cleaning everything up once we're through.
 373 */
 374
 375/* Decide if we want to return an AG header read failure. */
 376static inline bool
 377want_ag_read_header_failure(
 378        struct xfs_scrub        *sc,
 379        unsigned int            type)
 380{
 381        /* Return all AG header read failures when scanning btrees. */
 382        if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
 383            sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
 384            sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
 385                return true;
 386        /*
 387         * If we're scanning a given type of AG header, we only want to
 388         * see read failures from that specific header.  We'd like the
 389         * other headers to cross-check them, but this isn't required.
 390         */
 391        if (sc->sm->sm_type == type)
 392                return true;
 393        return false;
 394}
 395
 396/*
 397 * Grab all the headers for an AG.
 398 *
 399 * The headers should be released by xchk_ag_free, but as a fail
 400 * safe we attach all the buffers we grab to the scrub transaction so
 401 * they'll all be freed when we cancel it.
 402 */
 403int
 404xchk_ag_read_headers(
 405        struct xfs_scrub        *sc,
 406        xfs_agnumber_t          agno,
 407        struct xchk_ag          *sa)
 408{
 409        struct xfs_mount        *mp = sc->mp;
 410        int                     error;
 411
 412        sa->agno = agno;
 413
 414        error = xfs_ialloc_read_agi(mp, sc->tp, agno, &sa->agi_bp);
 415        if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
 416                goto out;
 417
 418        error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &sa->agf_bp);
 419        if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
 420                goto out;
 421
 422        error = xfs_alloc_read_agfl(mp, sc->tp, agno, &sa->agfl_bp);
 423        if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
 424                goto out;
 425        error = 0;
 426out:
 427        return error;
 428}
 429
 430/* Release all the AG btree cursors. */
 431void
 432xchk_ag_btcur_free(
 433        struct xchk_ag          *sa)
 434{
 435        if (sa->refc_cur)
 436                xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
 437        if (sa->rmap_cur)
 438                xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
 439        if (sa->fino_cur)
 440                xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
 441        if (sa->ino_cur)
 442                xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
 443        if (sa->cnt_cur)
 444                xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
 445        if (sa->bno_cur)
 446                xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
 447
 448        sa->refc_cur = NULL;
 449        sa->rmap_cur = NULL;
 450        sa->fino_cur = NULL;
 451        sa->ino_cur = NULL;
 452        sa->bno_cur = NULL;
 453        sa->cnt_cur = NULL;
 454}
 455
 456/* Initialize all the btree cursors for an AG. */
 457void
 458xchk_ag_btcur_init(
 459        struct xfs_scrub        *sc,
 460        struct xchk_ag          *sa)
 461{
 462        struct xfs_mount        *mp = sc->mp;
 463        xfs_agnumber_t          agno = sa->agno;
 464
 465        xchk_perag_get(sc->mp, sa);
 466        if (sa->agf_bp &&
 467            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) {
 468                /* Set up a bnobt cursor for cross-referencing. */
 469                sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
 470                                agno, XFS_BTNUM_BNO);
 471        }
 472
 473        if (sa->agf_bp &&
 474            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) {
 475                /* Set up a cntbt cursor for cross-referencing. */
 476                sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
 477                                agno, XFS_BTNUM_CNT);
 478        }
 479
 480        /* Set up a inobt cursor for cross-referencing. */
 481        if (sa->agi_bp &&
 482            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) {
 483                sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
 484                                        agno, XFS_BTNUM_INO);
 485        }
 486
 487        /* Set up a finobt cursor for cross-referencing. */
 488        if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb) &&
 489            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) {
 490                sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
 491                                agno, XFS_BTNUM_FINO);
 492        }
 493
 494        /* Set up a rmapbt cursor for cross-referencing. */
 495        if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb) &&
 496            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) {
 497                sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
 498                                agno);
 499        }
 500
 501        /* Set up a refcountbt cursor for cross-referencing. */
 502        if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb) &&
 503            xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) {
 504                sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
 505                                sa->agf_bp, agno);
 506        }
 507}
 508
 509/* Release the AG header context and btree cursors. */
 510void
 511xchk_ag_free(
 512        struct xfs_scrub        *sc,
 513        struct xchk_ag          *sa)
 514{
 515        xchk_ag_btcur_free(sa);
 516        if (sa->agfl_bp) {
 517                xfs_trans_brelse(sc->tp, sa->agfl_bp);
 518                sa->agfl_bp = NULL;
 519        }
 520        if (sa->agf_bp) {
 521                xfs_trans_brelse(sc->tp, sa->agf_bp);
 522                sa->agf_bp = NULL;
 523        }
 524        if (sa->agi_bp) {
 525                xfs_trans_brelse(sc->tp, sa->agi_bp);
 526                sa->agi_bp = NULL;
 527        }
 528        if (sa->pag) {
 529                xfs_perag_put(sa->pag);
 530                sa->pag = NULL;
 531        }
 532        sa->agno = NULLAGNUMBER;
 533}
 534
 535/*
 536 * For scrub, grab the AGI and the AGF headers, in that order.  Locking
 537 * order requires us to get the AGI before the AGF.  We use the
 538 * transaction to avoid deadlocking on crosslinked metadata buffers;
 539 * either the caller passes one in (bmap scrub) or we have to create a
 540 * transaction ourselves.
 541 */
 542int
 543xchk_ag_init(
 544        struct xfs_scrub        *sc,
 545        xfs_agnumber_t          agno,
 546        struct xchk_ag          *sa)
 547{
 548        int                     error;
 549
 550        error = xchk_ag_read_headers(sc, agno, sa);
 551        if (error)
 552                return error;
 553
 554        xchk_ag_btcur_init(sc, sa);
 555        return 0;
 556}
 557
 558/*
 559 * Grab the per-ag structure if we haven't already gotten it.  Teardown of the
 560 * xchk_ag will release it for us.
 561 */
 562void
 563xchk_perag_get(
 564        struct xfs_mount        *mp,
 565        struct xchk_ag          *sa)
 566{
 567        if (!sa->pag)
 568                sa->pag = xfs_perag_get(mp, sa->agno);
 569}
 570
 571/* Per-scrubber setup functions */
 572
 573/*
 574 * Grab an empty transaction so that we can re-grab locked buffers if
 575 * one of our btrees turns out to be cyclic.
 576 *
 577 * If we're going to repair something, we need to ask for the largest possible
 578 * log reservation so that we can handle the worst case scenario for metadata
 579 * updates while rebuilding a metadata item.  We also need to reserve as many
 580 * blocks in the head transaction as we think we're going to need to rebuild
 581 * the metadata object.
 582 */
 583int
 584xchk_trans_alloc(
 585        struct xfs_scrub        *sc,
 586        uint                    resblks)
 587{
 588        if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
 589                return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
 590                                resblks, 0, 0, &sc->tp);
 591
 592        return xfs_trans_alloc_empty(sc->mp, &sc->tp);
 593}
 594
 595/* Set us up with a transaction and an empty context. */
 596int
 597xchk_setup_fs(
 598        struct xfs_scrub        *sc)
 599{
 600        uint                    resblks;
 601
 602        resblks = xrep_calc_ag_resblks(sc);
 603        return xchk_trans_alloc(sc, resblks);
 604}
 605
 606/* Set us up with AG headers and btree cursors. */
 607int
 608xchk_setup_ag_btree(
 609        struct xfs_scrub        *sc,
 610        bool                    force_log)
 611{
 612        struct xfs_mount        *mp = sc->mp;
 613        int                     error;
 614
 615        /*
 616         * If the caller asks us to checkpont the log, do so.  This
 617         * expensive operation should be performed infrequently and only
 618         * as a last resort.  Any caller that sets force_log should
 619         * document why they need to do so.
 620         */
 621        if (force_log) {
 622                error = xchk_checkpoint_log(mp);
 623                if (error)
 624                        return error;
 625        }
 626
 627        error = xchk_setup_fs(sc);
 628        if (error)
 629                return error;
 630
 631        return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
 632}
 633
 634/* Push everything out of the log onto disk. */
 635int
 636xchk_checkpoint_log(
 637        struct xfs_mount        *mp)
 638{
 639        int                     error;
 640
 641        error = xfs_log_force(mp, XFS_LOG_SYNC);
 642        if (error)
 643                return error;
 644        xfs_ail_push_all_sync(mp->m_ail);
 645        return 0;
 646}
 647
 648/*
 649 * Given an inode and the scrub control structure, grab either the
 650 * inode referenced in the control structure or the inode passed in.
 651 * The inode is not locked.
 652 */
 653int
 654xchk_get_inode(
 655        struct xfs_scrub        *sc)
 656{
 657        struct xfs_imap         imap;
 658        struct xfs_mount        *mp = sc->mp;
 659        struct xfs_inode        *ip_in = XFS_I(file_inode(sc->file));
 660        struct xfs_inode        *ip = NULL;
 661        int                     error;
 662
 663        /* We want to scan the inode we already had opened. */
 664        if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
 665                sc->ip = ip_in;
 666                return 0;
 667        }
 668
 669        /* Look up the inode, see if the generation number matches. */
 670        if (xfs_internal_inum(mp, sc->sm->sm_ino))
 671                return -ENOENT;
 672        error = xfs_iget(mp, NULL, sc->sm->sm_ino,
 673                        XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
 674        switch (error) {
 675        case -ENOENT:
 676                /* Inode doesn't exist, just bail out. */
 677                return error;
 678        case 0:
 679                /* Got an inode, continue. */
 680                break;
 681        case -EINVAL:
 682                /*
 683                 * -EINVAL with IGET_UNTRUSTED could mean one of several
 684                 * things: userspace gave us an inode number that doesn't
 685                 * correspond to fs space, or doesn't have an inobt entry;
 686                 * or it could simply mean that the inode buffer failed the
 687                 * read verifiers.
 688                 *
 689                 * Try just the inode mapping lookup -- if it succeeds, then
 690                 * the inode buffer verifier failed and something needs fixing.
 691                 * Otherwise, we really couldn't find it so tell userspace
 692                 * that it no longer exists.
 693                 */
 694                error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
 695                                XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
 696                if (error)
 697                        return -ENOENT;
 698                error = -EFSCORRUPTED;
 699                /* fall through */
 700        default:
 701                trace_xchk_op_error(sc,
 702                                XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
 703                                XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
 704                                error, __return_address);
 705                return error;
 706        }
 707        if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
 708                xfs_irele(ip);
 709                return -ENOENT;
 710        }
 711
 712        sc->ip = ip;
 713        return 0;
 714}
 715
 716/* Set us up to scrub a file's contents. */
 717int
 718xchk_setup_inode_contents(
 719        struct xfs_scrub        *sc,
 720        unsigned int            resblks)
 721{
 722        int                     error;
 723
 724        error = xchk_get_inode(sc);
 725        if (error)
 726                return error;
 727
 728        /* Got the inode, lock it and we're ready to go. */
 729        sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
 730        xfs_ilock(sc->ip, sc->ilock_flags);
 731        error = xchk_trans_alloc(sc, resblks);
 732        if (error)
 733                goto out;
 734        sc->ilock_flags |= XFS_ILOCK_EXCL;
 735        xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
 736
 737out:
 738        /* scrub teardown will unlock and release the inode for us */
 739        return error;
 740}
 741
 742/*
 743 * Predicate that decides if we need to evaluate the cross-reference check.
 744 * If there was an error accessing the cross-reference btree, just delete
 745 * the cursor and skip the check.
 746 */
 747bool
 748xchk_should_check_xref(
 749        struct xfs_scrub        *sc,
 750        int                     *error,
 751        struct xfs_btree_cur    **curpp)
 752{
 753        /* No point in xref if we already know we're corrupt. */
 754        if (xchk_skip_xref(sc->sm))
 755                return false;
 756
 757        if (*error == 0)
 758                return true;
 759
 760        if (curpp) {
 761                /* If we've already given up on xref, just bail out. */
 762                if (!*curpp)
 763                        return false;
 764
 765                /* xref error, delete cursor and bail out. */
 766                xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
 767                *curpp = NULL;
 768        }
 769
 770        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
 771        trace_xchk_xref_error(sc, *error, __return_address);
 772
 773        /*
 774         * Errors encountered during cross-referencing with another
 775         * data structure should not cause this scrubber to abort.
 776         */
 777        *error = 0;
 778        return false;
 779}
 780
 781/* Run the structure verifiers on in-memory buffers to detect bad memory. */
 782void
 783xchk_buffer_recheck(
 784        struct xfs_scrub        *sc,
 785        struct xfs_buf          *bp)
 786{
 787        xfs_failaddr_t          fa;
 788
 789        if (bp->b_ops == NULL) {
 790                xchk_block_set_corrupt(sc, bp);
 791                return;
 792        }
 793        if (bp->b_ops->verify_struct == NULL) {
 794                xchk_set_incomplete(sc);
 795                return;
 796        }
 797        fa = bp->b_ops->verify_struct(bp);
 798        if (!fa)
 799                return;
 800        sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
 801        trace_xchk_block_error(sc, bp->b_bn, fa);
 802}
 803
 804/*
 805 * Scrub the attr/data forks of a metadata inode.  The metadata inode must be
 806 * pointed to by sc->ip and the ILOCK must be held.
 807 */
 808int
 809xchk_metadata_inode_forks(
 810        struct xfs_scrub        *sc)
 811{
 812        __u32                   smtype;
 813        bool                    shared;
 814        int                     error;
 815
 816        if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 817                return 0;
 818
 819        /* Metadata inodes don't live on the rt device. */
 820        if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) {
 821                xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 822                return 0;
 823        }
 824
 825        /* They should never participate in reflink. */
 826        if (xfs_is_reflink_inode(sc->ip)) {
 827                xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 828                return 0;
 829        }
 830
 831        /* They also should never have extended attributes. */
 832        if (xfs_inode_hasattr(sc->ip)) {
 833                xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 834                return 0;
 835        }
 836
 837        /* Invoke the data fork scrubber. */
 838        smtype = sc->sm->sm_type;
 839        sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
 840        error = xchk_bmap_data(sc);
 841        sc->sm->sm_type = smtype;
 842        if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
 843                return error;
 844
 845        /* Look for incorrect shared blocks. */
 846        if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
 847                error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
 848                                &shared);
 849                if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
 850                                &error))
 851                        return error;
 852                if (shared)
 853                        xchk_ino_set_corrupt(sc, sc->ip->i_ino);
 854        }
 855
 856        return error;
 857}
 858
 859/*
 860 * Try to lock an inode in violation of the usual locking order rules.  For
 861 * example, trying to get the IOLOCK while in transaction context, or just
 862 * plain breaking AG-order or inode-order inode locking rules.  Either way,
 863 * the only way to avoid an ABBA deadlock is to use trylock and back off if
 864 * we can't.
 865 */
 866int
 867xchk_ilock_inverted(
 868        struct xfs_inode        *ip,
 869        uint                    lock_mode)
 870{
 871        int                     i;
 872
 873        for (i = 0; i < 20; i++) {
 874                if (xfs_ilock_nowait(ip, lock_mode))
 875                        return 0;
 876                delay(1);
 877        }
 878        return -EDEADLOCK;
 879}
 880
 881/* Pause background reaping of resources. */
 882void
 883xchk_stop_reaping(
 884        struct xfs_scrub        *sc)
 885{
 886        sc->flags |= XCHK_REAPING_DISABLED;
 887        xfs_blockgc_stop(sc->mp);
 888}
 889
 890/* Restart background reaping of resources. */
 891void
 892xchk_start_reaping(
 893        struct xfs_scrub        *sc)
 894{
 895        xfs_blockgc_start(sc->mp);
 896        sc->flags &= ~XCHK_REAPING_DISABLED;
 897}
 898