linux/fs/xfs/xfs_log_recover.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_trans.h"
  18#include "xfs_log.h"
  19#include "xfs_log_priv.h"
  20#include "xfs_log_recover.h"
  21#include "xfs_inode_item.h"
  22#include "xfs_extfree_item.h"
  23#include "xfs_trans_priv.h"
  24#include "xfs_alloc.h"
  25#include "xfs_ialloc.h"
  26#include "xfs_quota.h"
  27#include "xfs_trace.h"
  28#include "xfs_icache.h"
  29#include "xfs_bmap_btree.h"
  30#include "xfs_error.h"
  31#include "xfs_dir2.h"
  32#include "xfs_rmap_item.h"
  33#include "xfs_buf_item.h"
  34#include "xfs_refcount_item.h"
  35#include "xfs_bmap_item.h"
  36
  37#define BLK_AVG(blk1, blk2)     ((blk1+blk2) >> 1)
  38
  39STATIC int
  40xlog_find_zeroed(
  41        struct xlog     *,
  42        xfs_daddr_t     *);
  43STATIC int
  44xlog_clear_stale_blocks(
  45        struct xlog     *,
  46        xfs_lsn_t);
  47#if defined(DEBUG)
  48STATIC void
  49xlog_recover_check_summary(
  50        struct xlog *);
  51#else
  52#define xlog_recover_check_summary(log)
  53#endif
  54STATIC int
  55xlog_do_recovery_pass(
  56        struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
  57
  58/*
  59 * This structure is used during recovery to record the buf log items which
  60 * have been canceled and should not be replayed.
  61 */
  62struct xfs_buf_cancel {
  63        xfs_daddr_t             bc_blkno;
  64        uint                    bc_len;
  65        int                     bc_refcount;
  66        struct list_head        bc_list;
  67};
  68
  69/*
  70 * Sector aligned buffer routines for buffer create/read/write/access
  71 */
  72
  73/*
  74 * Verify the log-relative block number and length in basic blocks are valid for
  75 * an operation involving the given XFS log buffer. Returns true if the fields
  76 * are valid, false otherwise.
  77 */
  78static inline bool
  79xlog_verify_bno(
  80        struct xlog     *log,
  81        xfs_daddr_t     blk_no,
  82        int             bbcount)
  83{
  84        if (blk_no < 0 || blk_no >= log->l_logBBsize)
  85                return false;
  86        if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
  87                return false;
  88        return true;
  89}
  90
  91/*
  92 * Allocate a buffer to hold log data.  The buffer needs to be able to map to
  93 * a range of nbblks basic blocks at any valid offset within the log.
  94 */
  95static char *
  96xlog_alloc_buffer(
  97        struct xlog     *log,
  98        int             nbblks)
  99{
 100        /*
 101         * Pass log block 0 since we don't have an addr yet, buffer will be
 102         * verified on read.
 103         */
 104        if (!xlog_verify_bno(log, 0, nbblks)) {
 105                xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
 106                        nbblks);
 107                XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
 108                return NULL;
 109        }
 110
 111        /*
 112         * We do log I/O in units of log sectors (a power-of-2 multiple of the
 113         * basic block size), so we round up the requested size to accommodate
 114         * the basic blocks required for complete log sectors.
 115         *
 116         * In addition, the buffer may be used for a non-sector-aligned block
 117         * offset, in which case an I/O of the requested size could extend
 118         * beyond the end of the buffer.  If the requested size is only 1 basic
 119         * block it will never straddle a sector boundary, so this won't be an
 120         * issue.  Nor will this be a problem if the log I/O is done in basic
 121         * blocks (sector size 1).  But otherwise we extend the buffer by one
 122         * extra log sector to ensure there's space to accommodate this
 123         * possibility.
 124         */
 125        if (nbblks > 1 && log->l_sectBBsize > 1)
 126                nbblks += log->l_sectBBsize;
 127        nbblks = round_up(nbblks, log->l_sectBBsize);
 128        return kmem_alloc_large(BBTOB(nbblks), KM_MAYFAIL);
 129}
 130
 131/*
 132 * Return the address of the start of the given block number's data
 133 * in a log buffer.  The buffer covers a log sector-aligned region.
 134 */
 135static inline unsigned int
 136xlog_align(
 137        struct xlog     *log,
 138        xfs_daddr_t     blk_no)
 139{
 140        return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
 141}
 142
 143static int
 144xlog_do_io(
 145        struct xlog             *log,
 146        xfs_daddr_t             blk_no,
 147        unsigned int            nbblks,
 148        char                    *data,
 149        unsigned int            op)
 150{
 151        int                     error;
 152
 153        if (!xlog_verify_bno(log, blk_no, nbblks)) {
 154                xfs_warn(log->l_mp,
 155                         "Invalid log block/length (0x%llx, 0x%x) for buffer",
 156                         blk_no, nbblks);
 157                XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
 158                return -EFSCORRUPTED;
 159        }
 160
 161        blk_no = round_down(blk_no, log->l_sectBBsize);
 162        nbblks = round_up(nbblks, log->l_sectBBsize);
 163        ASSERT(nbblks > 0);
 164
 165        error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
 166                        BBTOB(nbblks), data, op);
 167        if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
 168                xfs_alert(log->l_mp,
 169                          "log recovery %s I/O error at daddr 0x%llx len %d error %d",
 170                          op == REQ_OP_WRITE ? "write" : "read",
 171                          blk_no, nbblks, error);
 172        }
 173        return error;
 174}
 175
 176STATIC int
 177xlog_bread_noalign(
 178        struct xlog     *log,
 179        xfs_daddr_t     blk_no,
 180        int             nbblks,
 181        char            *data)
 182{
 183        return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
 184}
 185
 186STATIC int
 187xlog_bread(
 188        struct xlog     *log,
 189        xfs_daddr_t     blk_no,
 190        int             nbblks,
 191        char            *data,
 192        char            **offset)
 193{
 194        int             error;
 195
 196        error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
 197        if (!error)
 198                *offset = data + xlog_align(log, blk_no);
 199        return error;
 200}
 201
 202STATIC int
 203xlog_bwrite(
 204        struct xlog     *log,
 205        xfs_daddr_t     blk_no,
 206        int             nbblks,
 207        char            *data)
 208{
 209        return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
 210}
 211
 212#ifdef DEBUG
 213/*
 214 * dump debug superblock and log record information
 215 */
 216STATIC void
 217xlog_header_check_dump(
 218        xfs_mount_t             *mp,
 219        xlog_rec_header_t       *head)
 220{
 221        xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
 222                __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
 223        xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
 224                &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
 225}
 226#else
 227#define xlog_header_check_dump(mp, head)
 228#endif
 229
 230/*
 231 * check log record header for recovery
 232 */
 233STATIC int
 234xlog_header_check_recover(
 235        xfs_mount_t             *mp,
 236        xlog_rec_header_t       *head)
 237{
 238        ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 239
 240        /*
 241         * IRIX doesn't write the h_fmt field and leaves it zeroed
 242         * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
 243         * a dirty log created in IRIX.
 244         */
 245        if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
 246                xfs_warn(mp,
 247        "dirty log written in incompatible format - can't recover");
 248                xlog_header_check_dump(mp, head);
 249                XFS_ERROR_REPORT("xlog_header_check_recover(1)",
 250                                 XFS_ERRLEVEL_HIGH, mp);
 251                return -EFSCORRUPTED;
 252        } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
 253                xfs_warn(mp,
 254        "dirty log entry has mismatched uuid - can't recover");
 255                xlog_header_check_dump(mp, head);
 256                XFS_ERROR_REPORT("xlog_header_check_recover(2)",
 257                                 XFS_ERRLEVEL_HIGH, mp);
 258                return -EFSCORRUPTED;
 259        }
 260        return 0;
 261}
 262
 263/*
 264 * read the head block of the log and check the header
 265 */
 266STATIC int
 267xlog_header_check_mount(
 268        xfs_mount_t             *mp,
 269        xlog_rec_header_t       *head)
 270{
 271        ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 272
 273        if (uuid_is_null(&head->h_fs_uuid)) {
 274                /*
 275                 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
 276                 * h_fs_uuid is null, we assume this log was last mounted
 277                 * by IRIX and continue.
 278                 */
 279                xfs_warn(mp, "null uuid in log - IRIX style log");
 280        } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
 281                xfs_warn(mp, "log has mismatched uuid - can't recover");
 282                xlog_header_check_dump(mp, head);
 283                XFS_ERROR_REPORT("xlog_header_check_mount",
 284                                 XFS_ERRLEVEL_HIGH, mp);
 285                return -EFSCORRUPTED;
 286        }
 287        return 0;
 288}
 289
 290STATIC void
 291xlog_recover_iodone(
 292        struct xfs_buf  *bp)
 293{
 294        if (bp->b_error) {
 295                /*
 296                 * We're not going to bother about retrying
 297                 * this during recovery. One strike!
 298                 */
 299                if (!XFS_FORCED_SHUTDOWN(bp->b_mount)) {
 300                        xfs_buf_ioerror_alert(bp, __func__);
 301                        xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
 302                }
 303        }
 304
 305        /*
 306         * On v5 supers, a bli could be attached to update the metadata LSN.
 307         * Clean it up.
 308         */
 309        if (bp->b_log_item)
 310                xfs_buf_item_relse(bp);
 311        ASSERT(bp->b_log_item == NULL);
 312
 313        bp->b_iodone = NULL;
 314        xfs_buf_ioend(bp);
 315}
 316
 317/*
 318 * This routine finds (to an approximation) the first block in the physical
 319 * log which contains the given cycle.  It uses a binary search algorithm.
 320 * Note that the algorithm can not be perfect because the disk will not
 321 * necessarily be perfect.
 322 */
 323STATIC int
 324xlog_find_cycle_start(
 325        struct xlog     *log,
 326        char            *buffer,
 327        xfs_daddr_t     first_blk,
 328        xfs_daddr_t     *last_blk,
 329        uint            cycle)
 330{
 331        char            *offset;
 332        xfs_daddr_t     mid_blk;
 333        xfs_daddr_t     end_blk;
 334        uint            mid_cycle;
 335        int             error;
 336
 337        end_blk = *last_blk;
 338        mid_blk = BLK_AVG(first_blk, end_blk);
 339        while (mid_blk != first_blk && mid_blk != end_blk) {
 340                error = xlog_bread(log, mid_blk, 1, buffer, &offset);
 341                if (error)
 342                        return error;
 343                mid_cycle = xlog_get_cycle(offset);
 344                if (mid_cycle == cycle)
 345                        end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
 346                else
 347                        first_blk = mid_blk; /* first_half_cycle == mid_cycle */
 348                mid_blk = BLK_AVG(first_blk, end_blk);
 349        }
 350        ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
 351               (mid_blk == end_blk && mid_blk-1 == first_blk));
 352
 353        *last_blk = end_blk;
 354
 355        return 0;
 356}
 357
 358/*
 359 * Check that a range of blocks does not contain stop_on_cycle_no.
 360 * Fill in *new_blk with the block offset where such a block is
 361 * found, or with -1 (an invalid block number) if there is no such
 362 * block in the range.  The scan needs to occur from front to back
 363 * and the pointer into the region must be updated since a later
 364 * routine will need to perform another test.
 365 */
 366STATIC int
 367xlog_find_verify_cycle(
 368        struct xlog     *log,
 369        xfs_daddr_t     start_blk,
 370        int             nbblks,
 371        uint            stop_on_cycle_no,
 372        xfs_daddr_t     *new_blk)
 373{
 374        xfs_daddr_t     i, j;
 375        uint            cycle;
 376        char            *buffer;
 377        xfs_daddr_t     bufblks;
 378        char            *buf = NULL;
 379        int             error = 0;
 380
 381        /*
 382         * Greedily allocate a buffer big enough to handle the full
 383         * range of basic blocks we'll be examining.  If that fails,
 384         * try a smaller size.  We need to be able to read at least
 385         * a log sector, or we're out of luck.
 386         */
 387        bufblks = 1 << ffs(nbblks);
 388        while (bufblks > log->l_logBBsize)
 389                bufblks >>= 1;
 390        while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
 391                bufblks >>= 1;
 392                if (bufblks < log->l_sectBBsize)
 393                        return -ENOMEM;
 394        }
 395
 396        for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
 397                int     bcount;
 398
 399                bcount = min(bufblks, (start_blk + nbblks - i));
 400
 401                error = xlog_bread(log, i, bcount, buffer, &buf);
 402                if (error)
 403                        goto out;
 404
 405                for (j = 0; j < bcount; j++) {
 406                        cycle = xlog_get_cycle(buf);
 407                        if (cycle == stop_on_cycle_no) {
 408                                *new_blk = i+j;
 409                                goto out;
 410                        }
 411
 412                        buf += BBSIZE;
 413                }
 414        }
 415
 416        *new_blk = -1;
 417
 418out:
 419        kmem_free(buffer);
 420        return error;
 421}
 422
 423/*
 424 * Potentially backup over partial log record write.
 425 *
 426 * In the typical case, last_blk is the number of the block directly after
 427 * a good log record.  Therefore, we subtract one to get the block number
 428 * of the last block in the given buffer.  extra_bblks contains the number
 429 * of blocks we would have read on a previous read.  This happens when the
 430 * last log record is split over the end of the physical log.
 431 *
 432 * extra_bblks is the number of blocks potentially verified on a previous
 433 * call to this routine.
 434 */
 435STATIC int
 436xlog_find_verify_log_record(
 437        struct xlog             *log,
 438        xfs_daddr_t             start_blk,
 439        xfs_daddr_t             *last_blk,
 440        int                     extra_bblks)
 441{
 442        xfs_daddr_t             i;
 443        char                    *buffer;
 444        char                    *offset = NULL;
 445        xlog_rec_header_t       *head = NULL;
 446        int                     error = 0;
 447        int                     smallmem = 0;
 448        int                     num_blks = *last_blk - start_blk;
 449        int                     xhdrs;
 450
 451        ASSERT(start_blk != 0 || *last_blk != start_blk);
 452
 453        buffer = xlog_alloc_buffer(log, num_blks);
 454        if (!buffer) {
 455                buffer = xlog_alloc_buffer(log, 1);
 456                if (!buffer)
 457                        return -ENOMEM;
 458                smallmem = 1;
 459        } else {
 460                error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
 461                if (error)
 462                        goto out;
 463                offset += ((num_blks - 1) << BBSHIFT);
 464        }
 465
 466        for (i = (*last_blk) - 1; i >= 0; i--) {
 467                if (i < start_blk) {
 468                        /* valid log record not found */
 469                        xfs_warn(log->l_mp,
 470                "Log inconsistent (didn't find previous header)");
 471                        ASSERT(0);
 472                        error = -EIO;
 473                        goto out;
 474                }
 475
 476                if (smallmem) {
 477                        error = xlog_bread(log, i, 1, buffer, &offset);
 478                        if (error)
 479                                goto out;
 480                }
 481
 482                head = (xlog_rec_header_t *)offset;
 483
 484                if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
 485                        break;
 486
 487                if (!smallmem)
 488                        offset -= BBSIZE;
 489        }
 490
 491        /*
 492         * We hit the beginning of the physical log & still no header.  Return
 493         * to caller.  If caller can handle a return of -1, then this routine
 494         * will be called again for the end of the physical log.
 495         */
 496        if (i == -1) {
 497                error = 1;
 498                goto out;
 499        }
 500
 501        /*
 502         * We have the final block of the good log (the first block
 503         * of the log record _before_ the head. So we check the uuid.
 504         */
 505        if ((error = xlog_header_check_mount(log->l_mp, head)))
 506                goto out;
 507
 508        /*
 509         * We may have found a log record header before we expected one.
 510         * last_blk will be the 1st block # with a given cycle #.  We may end
 511         * up reading an entire log record.  In this case, we don't want to
 512         * reset last_blk.  Only when last_blk points in the middle of a log
 513         * record do we update last_blk.
 514         */
 515        if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
 516                uint    h_size = be32_to_cpu(head->h_size);
 517
 518                xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
 519                if (h_size % XLOG_HEADER_CYCLE_SIZE)
 520                        xhdrs++;
 521        } else {
 522                xhdrs = 1;
 523        }
 524
 525        if (*last_blk - i + extra_bblks !=
 526            BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
 527                *last_blk = i;
 528
 529out:
 530        kmem_free(buffer);
 531        return error;
 532}
 533
 534/*
 535 * Head is defined to be the point of the log where the next log write
 536 * could go.  This means that incomplete LR writes at the end are
 537 * eliminated when calculating the head.  We aren't guaranteed that previous
 538 * LR have complete transactions.  We only know that a cycle number of
 539 * current cycle number -1 won't be present in the log if we start writing
 540 * from our current block number.
 541 *
 542 * last_blk contains the block number of the first block with a given
 543 * cycle number.
 544 *
 545 * Return: zero if normal, non-zero if error.
 546 */
 547STATIC int
 548xlog_find_head(
 549        struct xlog     *log,
 550        xfs_daddr_t     *return_head_blk)
 551{
 552        char            *buffer;
 553        char            *offset;
 554        xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
 555        int             num_scan_bblks;
 556        uint            first_half_cycle, last_half_cycle;
 557        uint            stop_on_cycle;
 558        int             error, log_bbnum = log->l_logBBsize;
 559
 560        /* Is the end of the log device zeroed? */
 561        error = xlog_find_zeroed(log, &first_blk);
 562        if (error < 0) {
 563                xfs_warn(log->l_mp, "empty log check failed");
 564                return error;
 565        }
 566        if (error == 1) {
 567                *return_head_blk = first_blk;
 568
 569                /* Is the whole lot zeroed? */
 570                if (!first_blk) {
 571                        /* Linux XFS shouldn't generate totally zeroed logs -
 572                         * mkfs etc write a dummy unmount record to a fresh
 573                         * log so we can store the uuid in there
 574                         */
 575                        xfs_warn(log->l_mp, "totally zeroed log");
 576                }
 577
 578                return 0;
 579        }
 580
 581        first_blk = 0;                  /* get cycle # of 1st block */
 582        buffer = xlog_alloc_buffer(log, 1);
 583        if (!buffer)
 584                return -ENOMEM;
 585
 586        error = xlog_bread(log, 0, 1, buffer, &offset);
 587        if (error)
 588                goto out_free_buffer;
 589
 590        first_half_cycle = xlog_get_cycle(offset);
 591
 592        last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
 593        error = xlog_bread(log, last_blk, 1, buffer, &offset);
 594        if (error)
 595                goto out_free_buffer;
 596
 597        last_half_cycle = xlog_get_cycle(offset);
 598        ASSERT(last_half_cycle != 0);
 599
 600        /*
 601         * If the 1st half cycle number is equal to the last half cycle number,
 602         * then the entire log is stamped with the same cycle number.  In this
 603         * case, head_blk can't be set to zero (which makes sense).  The below
 604         * math doesn't work out properly with head_blk equal to zero.  Instead,
 605         * we set it to log_bbnum which is an invalid block number, but this
 606         * value makes the math correct.  If head_blk doesn't changed through
 607         * all the tests below, *head_blk is set to zero at the very end rather
 608         * than log_bbnum.  In a sense, log_bbnum and zero are the same block
 609         * in a circular file.
 610         */
 611        if (first_half_cycle == last_half_cycle) {
 612                /*
 613                 * In this case we believe that the entire log should have
 614                 * cycle number last_half_cycle.  We need to scan backwards
 615                 * from the end verifying that there are no holes still
 616                 * containing last_half_cycle - 1.  If we find such a hole,
 617                 * then the start of that hole will be the new head.  The
 618                 * simple case looks like
 619                 *        x | x ... | x - 1 | x
 620                 * Another case that fits this picture would be
 621                 *        x | x + 1 | x ... | x
 622                 * In this case the head really is somewhere at the end of the
 623                 * log, as one of the latest writes at the beginning was
 624                 * incomplete.
 625                 * One more case is
 626                 *        x | x + 1 | x ... | x - 1 | x
 627                 * This is really the combination of the above two cases, and
 628                 * the head has to end up at the start of the x-1 hole at the
 629                 * end of the log.
 630                 *
 631                 * In the 256k log case, we will read from the beginning to the
 632                 * end of the log and search for cycle numbers equal to x-1.
 633                 * We don't worry about the x+1 blocks that we encounter,
 634                 * because we know that they cannot be the head since the log
 635                 * started with x.
 636                 */
 637                head_blk = log_bbnum;
 638                stop_on_cycle = last_half_cycle - 1;
 639        } else {
 640                /*
 641                 * In this case we want to find the first block with cycle
 642                 * number matching last_half_cycle.  We expect the log to be
 643                 * some variation on
 644                 *        x + 1 ... | x ... | x
 645                 * The first block with cycle number x (last_half_cycle) will
 646                 * be where the new head belongs.  First we do a binary search
 647                 * for the first occurrence of last_half_cycle.  The binary
 648                 * search may not be totally accurate, so then we scan back
 649                 * from there looking for occurrences of last_half_cycle before
 650                 * us.  If that backwards scan wraps around the beginning of
 651                 * the log, then we look for occurrences of last_half_cycle - 1
 652                 * at the end of the log.  The cases we're looking for look
 653                 * like
 654                 *                               v binary search stopped here
 655                 *        x + 1 ... | x | x + 1 | x ... | x
 656                 *                   ^ but we want to locate this spot
 657                 * or
 658                 *        <---------> less than scan distance
 659                 *        x + 1 ... | x ... | x - 1 | x
 660                 *                           ^ we want to locate this spot
 661                 */
 662                stop_on_cycle = last_half_cycle;
 663                error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
 664                                last_half_cycle);
 665                if (error)
 666                        goto out_free_buffer;
 667        }
 668
 669        /*
 670         * Now validate the answer.  Scan back some number of maximum possible
 671         * blocks and make sure each one has the expected cycle number.  The
 672         * maximum is determined by the total possible amount of buffering
 673         * in the in-core log.  The following number can be made tighter if
 674         * we actually look at the block size of the filesystem.
 675         */
 676        num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
 677        if (head_blk >= num_scan_bblks) {
 678                /*
 679                 * We are guaranteed that the entire check can be performed
 680                 * in one buffer.
 681                 */
 682                start_blk = head_blk - num_scan_bblks;
 683                if ((error = xlog_find_verify_cycle(log,
 684                                                start_blk, num_scan_bblks,
 685                                                stop_on_cycle, &new_blk)))
 686                        goto out_free_buffer;
 687                if (new_blk != -1)
 688                        head_blk = new_blk;
 689        } else {                /* need to read 2 parts of log */
 690                /*
 691                 * We are going to scan backwards in the log in two parts.
 692                 * First we scan the physical end of the log.  In this part
 693                 * of the log, we are looking for blocks with cycle number
 694                 * last_half_cycle - 1.
 695                 * If we find one, then we know that the log starts there, as
 696                 * we've found a hole that didn't get written in going around
 697                 * the end of the physical log.  The simple case for this is
 698                 *        x + 1 ... | x ... | x - 1 | x
 699                 *        <---------> less than scan distance
 700                 * If all of the blocks at the end of the log have cycle number
 701                 * last_half_cycle, then we check the blocks at the start of
 702                 * the log looking for occurrences of last_half_cycle.  If we
 703                 * find one, then our current estimate for the location of the
 704                 * first occurrence of last_half_cycle is wrong and we move
 705                 * back to the hole we've found.  This case looks like
 706                 *        x + 1 ... | x | x + 1 | x ...
 707                 *                               ^ binary search stopped here
 708                 * Another case we need to handle that only occurs in 256k
 709                 * logs is
 710                 *        x + 1 ... | x ... | x+1 | x ...
 711                 *                   ^ binary search stops here
 712                 * In a 256k log, the scan at the end of the log will see the
 713                 * x + 1 blocks.  We need to skip past those since that is
 714                 * certainly not the head of the log.  By searching for
 715                 * last_half_cycle-1 we accomplish that.
 716                 */
 717                ASSERT(head_blk <= INT_MAX &&
 718                        (xfs_daddr_t) num_scan_bblks >= head_blk);
 719                start_blk = log_bbnum - (num_scan_bblks - head_blk);
 720                if ((error = xlog_find_verify_cycle(log, start_blk,
 721                                        num_scan_bblks - (int)head_blk,
 722                                        (stop_on_cycle - 1), &new_blk)))
 723                        goto out_free_buffer;
 724                if (new_blk != -1) {
 725                        head_blk = new_blk;
 726                        goto validate_head;
 727                }
 728
 729                /*
 730                 * Scan beginning of log now.  The last part of the physical
 731                 * log is good.  This scan needs to verify that it doesn't find
 732                 * the last_half_cycle.
 733                 */
 734                start_blk = 0;
 735                ASSERT(head_blk <= INT_MAX);
 736                if ((error = xlog_find_verify_cycle(log,
 737                                        start_blk, (int)head_blk,
 738                                        stop_on_cycle, &new_blk)))
 739                        goto out_free_buffer;
 740                if (new_blk != -1)
 741                        head_blk = new_blk;
 742        }
 743
 744validate_head:
 745        /*
 746         * Now we need to make sure head_blk is not pointing to a block in
 747         * the middle of a log record.
 748         */
 749        num_scan_bblks = XLOG_REC_SHIFT(log);
 750        if (head_blk >= num_scan_bblks) {
 751                start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
 752
 753                /* start ptr at last block ptr before head_blk */
 754                error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
 755                if (error == 1)
 756                        error = -EIO;
 757                if (error)
 758                        goto out_free_buffer;
 759        } else {
 760                start_blk = 0;
 761                ASSERT(head_blk <= INT_MAX);
 762                error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
 763                if (error < 0)
 764                        goto out_free_buffer;
 765                if (error == 1) {
 766                        /* We hit the beginning of the log during our search */
 767                        start_blk = log_bbnum - (num_scan_bblks - head_blk);
 768                        new_blk = log_bbnum;
 769                        ASSERT(start_blk <= INT_MAX &&
 770                                (xfs_daddr_t) log_bbnum-start_blk >= 0);
 771                        ASSERT(head_blk <= INT_MAX);
 772                        error = xlog_find_verify_log_record(log, start_blk,
 773                                                        &new_blk, (int)head_blk);
 774                        if (error == 1)
 775                                error = -EIO;
 776                        if (error)
 777                                goto out_free_buffer;
 778                        if (new_blk != log_bbnum)
 779                                head_blk = new_blk;
 780                } else if (error)
 781                        goto out_free_buffer;
 782        }
 783
 784        kmem_free(buffer);
 785        if (head_blk == log_bbnum)
 786                *return_head_blk = 0;
 787        else
 788                *return_head_blk = head_blk;
 789        /*
 790         * When returning here, we have a good block number.  Bad block
 791         * means that during a previous crash, we didn't have a clean break
 792         * from cycle number N to cycle number N-1.  In this case, we need
 793         * to find the first block with cycle number N-1.
 794         */
 795        return 0;
 796
 797out_free_buffer:
 798        kmem_free(buffer);
 799        if (error)
 800                xfs_warn(log->l_mp, "failed to find log head");
 801        return error;
 802}
 803
 804/*
 805 * Seek backwards in the log for log record headers.
 806 *
 807 * Given a starting log block, walk backwards until we find the provided number
 808 * of records or hit the provided tail block. The return value is the number of
 809 * records encountered or a negative error code. The log block and buffer
 810 * pointer of the last record seen are returned in rblk and rhead respectively.
 811 */
 812STATIC int
 813xlog_rseek_logrec_hdr(
 814        struct xlog             *log,
 815        xfs_daddr_t             head_blk,
 816        xfs_daddr_t             tail_blk,
 817        int                     count,
 818        char                    *buffer,
 819        xfs_daddr_t             *rblk,
 820        struct xlog_rec_header  **rhead,
 821        bool                    *wrapped)
 822{
 823        int                     i;
 824        int                     error;
 825        int                     found = 0;
 826        char                    *offset = NULL;
 827        xfs_daddr_t             end_blk;
 828
 829        *wrapped = false;
 830
 831        /*
 832         * Walk backwards from the head block until we hit the tail or the first
 833         * block in the log.
 834         */
 835        end_blk = head_blk > tail_blk ? tail_blk : 0;
 836        for (i = (int) head_blk - 1; i >= end_blk; i--) {
 837                error = xlog_bread(log, i, 1, buffer, &offset);
 838                if (error)
 839                        goto out_error;
 840
 841                if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 842                        *rblk = i;
 843                        *rhead = (struct xlog_rec_header *) offset;
 844                        if (++found == count)
 845                                break;
 846                }
 847        }
 848
 849        /*
 850         * If we haven't hit the tail block or the log record header count,
 851         * start looking again from the end of the physical log. Note that
 852         * callers can pass head == tail if the tail is not yet known.
 853         */
 854        if (tail_blk >= head_blk && found != count) {
 855                for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
 856                        error = xlog_bread(log, i, 1, buffer, &offset);
 857                        if (error)
 858                                goto out_error;
 859
 860                        if (*(__be32 *)offset ==
 861                            cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 862                                *wrapped = true;
 863                                *rblk = i;
 864                                *rhead = (struct xlog_rec_header *) offset;
 865                                if (++found == count)
 866                                        break;
 867                        }
 868                }
 869        }
 870
 871        return found;
 872
 873out_error:
 874        return error;
 875}
 876
 877/*
 878 * Seek forward in the log for log record headers.
 879 *
 880 * Given head and tail blocks, walk forward from the tail block until we find
 881 * the provided number of records or hit the head block. The return value is the
 882 * number of records encountered or a negative error code. The log block and
 883 * buffer pointer of the last record seen are returned in rblk and rhead
 884 * respectively.
 885 */
 886STATIC int
 887xlog_seek_logrec_hdr(
 888        struct xlog             *log,
 889        xfs_daddr_t             head_blk,
 890        xfs_daddr_t             tail_blk,
 891        int                     count,
 892        char                    *buffer,
 893        xfs_daddr_t             *rblk,
 894        struct xlog_rec_header  **rhead,
 895        bool                    *wrapped)
 896{
 897        int                     i;
 898        int                     error;
 899        int                     found = 0;
 900        char                    *offset = NULL;
 901        xfs_daddr_t             end_blk;
 902
 903        *wrapped = false;
 904
 905        /*
 906         * Walk forward from the tail block until we hit the head or the last
 907         * block in the log.
 908         */
 909        end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
 910        for (i = (int) tail_blk; i <= end_blk; i++) {
 911                error = xlog_bread(log, i, 1, buffer, &offset);
 912                if (error)
 913                        goto out_error;
 914
 915                if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 916                        *rblk = i;
 917                        *rhead = (struct xlog_rec_header *) offset;
 918                        if (++found == count)
 919                                break;
 920                }
 921        }
 922
 923        /*
 924         * If we haven't hit the head block or the log record header count,
 925         * start looking again from the start of the physical log.
 926         */
 927        if (tail_blk > head_blk && found != count) {
 928                for (i = 0; i < (int) head_blk; i++) {
 929                        error = xlog_bread(log, i, 1, buffer, &offset);
 930                        if (error)
 931                                goto out_error;
 932
 933                        if (*(__be32 *)offset ==
 934                            cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 935                                *wrapped = true;
 936                                *rblk = i;
 937                                *rhead = (struct xlog_rec_header *) offset;
 938                                if (++found == count)
 939                                        break;
 940                        }
 941                }
 942        }
 943
 944        return found;
 945
 946out_error:
 947        return error;
 948}
 949
 950/*
 951 * Calculate distance from head to tail (i.e., unused space in the log).
 952 */
 953static inline int
 954xlog_tail_distance(
 955        struct xlog     *log,
 956        xfs_daddr_t     head_blk,
 957        xfs_daddr_t     tail_blk)
 958{
 959        if (head_blk < tail_blk)
 960                return tail_blk - head_blk;
 961
 962        return tail_blk + (log->l_logBBsize - head_blk);
 963}
 964
 965/*
 966 * Verify the log tail. This is particularly important when torn or incomplete
 967 * writes have been detected near the front of the log and the head has been
 968 * walked back accordingly.
 969 *
 970 * We also have to handle the case where the tail was pinned and the head
 971 * blocked behind the tail right before a crash. If the tail had been pushed
 972 * immediately prior to the crash and the subsequent checkpoint was only
 973 * partially written, it's possible it overwrote the last referenced tail in the
 974 * log with garbage. This is not a coherency problem because the tail must have
 975 * been pushed before it can be overwritten, but appears as log corruption to
 976 * recovery because we have no way to know the tail was updated if the
 977 * subsequent checkpoint didn't write successfully.
 978 *
 979 * Therefore, CRC check the log from tail to head. If a failure occurs and the
 980 * offending record is within max iclog bufs from the head, walk the tail
 981 * forward and retry until a valid tail is found or corruption is detected out
 982 * of the range of a possible overwrite.
 983 */
 984STATIC int
 985xlog_verify_tail(
 986        struct xlog             *log,
 987        xfs_daddr_t             head_blk,
 988        xfs_daddr_t             *tail_blk,
 989        int                     hsize)
 990{
 991        struct xlog_rec_header  *thead;
 992        char                    *buffer;
 993        xfs_daddr_t             first_bad;
 994        int                     error = 0;
 995        bool                    wrapped;
 996        xfs_daddr_t             tmp_tail;
 997        xfs_daddr_t             orig_tail = *tail_blk;
 998
 999        buffer = xlog_alloc_buffer(log, 1);
1000        if (!buffer)
1001                return -ENOMEM;
1002
1003        /*
1004         * Make sure the tail points to a record (returns positive count on
1005         * success).
1006         */
1007        error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
1008                        &tmp_tail, &thead, &wrapped);
1009        if (error < 0)
1010                goto out;
1011        if (*tail_blk != tmp_tail)
1012                *tail_blk = tmp_tail;
1013
1014        /*
1015         * Run a CRC check from the tail to the head. We can't just check
1016         * MAX_ICLOGS records past the tail because the tail may point to stale
1017         * blocks cleared during the search for the head/tail. These blocks are
1018         * overwritten with zero-length records and thus record count is not a
1019         * reliable indicator of the iclog state before a crash.
1020         */
1021        first_bad = 0;
1022        error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1023                                      XLOG_RECOVER_CRCPASS, &first_bad);
1024        while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1025                int     tail_distance;
1026
1027                /*
1028                 * Is corruption within range of the head? If so, retry from
1029                 * the next record. Otherwise return an error.
1030                 */
1031                tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1032                if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1033                        break;
1034
1035                /* skip to the next record; returns positive count on success */
1036                error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
1037                                buffer, &tmp_tail, &thead, &wrapped);
1038                if (error < 0)
1039                        goto out;
1040
1041                *tail_blk = tmp_tail;
1042                first_bad = 0;
1043                error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1044                                              XLOG_RECOVER_CRCPASS, &first_bad);
1045        }
1046
1047        if (!error && *tail_blk != orig_tail)
1048                xfs_warn(log->l_mp,
1049                "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1050                         orig_tail, *tail_blk);
1051out:
1052        kmem_free(buffer);
1053        return error;
1054}
1055
1056/*
1057 * Detect and trim torn writes from the head of the log.
1058 *
1059 * Storage without sector atomicity guarantees can result in torn writes in the
1060 * log in the event of a crash. Our only means to detect this scenario is via
1061 * CRC verification. While we can't always be certain that CRC verification
1062 * failure is due to a torn write vs. an unrelated corruption, we do know that
1063 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1064 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1065 * the log and treat failures in this range as torn writes as a matter of
1066 * policy. In the event of CRC failure, the head is walked back to the last good
1067 * record in the log and the tail is updated from that record and verified.
1068 */
1069STATIC int
1070xlog_verify_head(
1071        struct xlog             *log,
1072        xfs_daddr_t             *head_blk,      /* in/out: unverified head */
1073        xfs_daddr_t             *tail_blk,      /* out: tail block */
1074        char                    *buffer,
1075        xfs_daddr_t             *rhead_blk,     /* start blk of last record */
1076        struct xlog_rec_header  **rhead,        /* ptr to last record */
1077        bool                    *wrapped)       /* last rec. wraps phys. log */
1078{
1079        struct xlog_rec_header  *tmp_rhead;
1080        char                    *tmp_buffer;
1081        xfs_daddr_t             first_bad;
1082        xfs_daddr_t             tmp_rhead_blk;
1083        int                     found;
1084        int                     error;
1085        bool                    tmp_wrapped;
1086
1087        /*
1088         * Check the head of the log for torn writes. Search backwards from the
1089         * head until we hit the tail or the maximum number of log record I/Os
1090         * that could have been in flight at one time. Use a temporary buffer so
1091         * we don't trash the rhead/buffer pointers from the caller.
1092         */
1093        tmp_buffer = xlog_alloc_buffer(log, 1);
1094        if (!tmp_buffer)
1095                return -ENOMEM;
1096        error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1097                                      XLOG_MAX_ICLOGS, tmp_buffer,
1098                                      &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1099        kmem_free(tmp_buffer);
1100        if (error < 0)
1101                return error;
1102
1103        /*
1104         * Now run a CRC verification pass over the records starting at the
1105         * block found above to the current head. If a CRC failure occurs, the
1106         * log block of the first bad record is saved in first_bad.
1107         */
1108        error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1109                                      XLOG_RECOVER_CRCPASS, &first_bad);
1110        if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1111                /*
1112                 * We've hit a potential torn write. Reset the error and warn
1113                 * about it.
1114                 */
1115                error = 0;
1116                xfs_warn(log->l_mp,
1117"Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1118                         first_bad, *head_blk);
1119
1120                /*
1121                 * Get the header block and buffer pointer for the last good
1122                 * record before the bad record.
1123                 *
1124                 * Note that xlog_find_tail() clears the blocks at the new head
1125                 * (i.e., the records with invalid CRC) if the cycle number
1126                 * matches the the current cycle.
1127                 */
1128                found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1129                                buffer, rhead_blk, rhead, wrapped);
1130                if (found < 0)
1131                        return found;
1132                if (found == 0)         /* XXX: right thing to do here? */
1133                        return -EIO;
1134
1135                /*
1136                 * Reset the head block to the starting block of the first bad
1137                 * log record and set the tail block based on the last good
1138                 * record.
1139                 *
1140                 * Bail out if the updated head/tail match as this indicates
1141                 * possible corruption outside of the acceptable
1142                 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1143                 */
1144                *head_blk = first_bad;
1145                *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1146                if (*head_blk == *tail_blk) {
1147                        ASSERT(0);
1148                        return 0;
1149                }
1150        }
1151        if (error)
1152                return error;
1153
1154        return xlog_verify_tail(log, *head_blk, tail_blk,
1155                                be32_to_cpu((*rhead)->h_size));
1156}
1157
1158/*
1159 * We need to make sure we handle log wrapping properly, so we can't use the
1160 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1161 * log.
1162 *
1163 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1164 * operation here and cast it back to a 64 bit daddr on return.
1165 */
1166static inline xfs_daddr_t
1167xlog_wrap_logbno(
1168        struct xlog             *log,
1169        xfs_daddr_t             bno)
1170{
1171        int                     mod;
1172
1173        div_s64_rem(bno, log->l_logBBsize, &mod);
1174        return mod;
1175}
1176
1177/*
1178 * Check whether the head of the log points to an unmount record. In other
1179 * words, determine whether the log is clean. If so, update the in-core state
1180 * appropriately.
1181 */
1182static int
1183xlog_check_unmount_rec(
1184        struct xlog             *log,
1185        xfs_daddr_t             *head_blk,
1186        xfs_daddr_t             *tail_blk,
1187        struct xlog_rec_header  *rhead,
1188        xfs_daddr_t             rhead_blk,
1189        char                    *buffer,
1190        bool                    *clean)
1191{
1192        struct xlog_op_header   *op_head;
1193        xfs_daddr_t             umount_data_blk;
1194        xfs_daddr_t             after_umount_blk;
1195        int                     hblks;
1196        int                     error;
1197        char                    *offset;
1198
1199        *clean = false;
1200
1201        /*
1202         * Look for unmount record. If we find it, then we know there was a
1203         * clean unmount. Since 'i' could be the last block in the physical
1204         * log, we convert to a log block before comparing to the head_blk.
1205         *
1206         * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1207         * below. We won't want to clear the unmount record if there is one, so
1208         * we pass the lsn of the unmount record rather than the block after it.
1209         */
1210        if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1211                int     h_size = be32_to_cpu(rhead->h_size);
1212                int     h_version = be32_to_cpu(rhead->h_version);
1213
1214                if ((h_version & XLOG_VERSION_2) &&
1215                    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1216                        hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1217                        if (h_size % XLOG_HEADER_CYCLE_SIZE)
1218                                hblks++;
1219                } else {
1220                        hblks = 1;
1221                }
1222        } else {
1223                hblks = 1;
1224        }
1225
1226        after_umount_blk = xlog_wrap_logbno(log,
1227                        rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1228
1229        if (*head_blk == after_umount_blk &&
1230            be32_to_cpu(rhead->h_num_logops) == 1) {
1231                umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1232                error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1233                if (error)
1234                        return error;
1235
1236                op_head = (struct xlog_op_header *)offset;
1237                if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1238                        /*
1239                         * Set tail and last sync so that newly written log
1240                         * records will point recovery to after the current
1241                         * unmount record.
1242                         */
1243                        xlog_assign_atomic_lsn(&log->l_tail_lsn,
1244                                        log->l_curr_cycle, after_umount_blk);
1245                        xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1246                                        log->l_curr_cycle, after_umount_blk);
1247                        *tail_blk = after_umount_blk;
1248
1249                        *clean = true;
1250                }
1251        }
1252
1253        return 0;
1254}
1255
1256static void
1257xlog_set_state(
1258        struct xlog             *log,
1259        xfs_daddr_t             head_blk,
1260        struct xlog_rec_header  *rhead,
1261        xfs_daddr_t             rhead_blk,
1262        bool                    bump_cycle)
1263{
1264        /*
1265         * Reset log values according to the state of the log when we
1266         * crashed.  In the case where head_blk == 0, we bump curr_cycle
1267         * one because the next write starts a new cycle rather than
1268         * continuing the cycle of the last good log record.  At this
1269         * point we have guaranteed that all partial log records have been
1270         * accounted for.  Therefore, we know that the last good log record
1271         * written was complete and ended exactly on the end boundary
1272         * of the physical log.
1273         */
1274        log->l_prev_block = rhead_blk;
1275        log->l_curr_block = (int)head_blk;
1276        log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1277        if (bump_cycle)
1278                log->l_curr_cycle++;
1279        atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1280        atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1281        xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1282                                        BBTOB(log->l_curr_block));
1283        xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1284                                        BBTOB(log->l_curr_block));
1285}
1286
1287/*
1288 * Find the sync block number or the tail of the log.
1289 *
1290 * This will be the block number of the last record to have its
1291 * associated buffers synced to disk.  Every log record header has
1292 * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1293 * to get a sync block number.  The only concern is to figure out which
1294 * log record header to believe.
1295 *
1296 * The following algorithm uses the log record header with the largest
1297 * lsn.  The entire log record does not need to be valid.  We only care
1298 * that the header is valid.
1299 *
1300 * We could speed up search by using current head_blk buffer, but it is not
1301 * available.
1302 */
1303STATIC int
1304xlog_find_tail(
1305        struct xlog             *log,
1306        xfs_daddr_t             *head_blk,
1307        xfs_daddr_t             *tail_blk)
1308{
1309        xlog_rec_header_t       *rhead;
1310        char                    *offset = NULL;
1311        char                    *buffer;
1312        int                     error;
1313        xfs_daddr_t             rhead_blk;
1314        xfs_lsn_t               tail_lsn;
1315        bool                    wrapped = false;
1316        bool                    clean = false;
1317
1318        /*
1319         * Find previous log record
1320         */
1321        if ((error = xlog_find_head(log, head_blk)))
1322                return error;
1323        ASSERT(*head_blk < INT_MAX);
1324
1325        buffer = xlog_alloc_buffer(log, 1);
1326        if (!buffer)
1327                return -ENOMEM;
1328        if (*head_blk == 0) {                           /* special case */
1329                error = xlog_bread(log, 0, 1, buffer, &offset);
1330                if (error)
1331                        goto done;
1332
1333                if (xlog_get_cycle(offset) == 0) {
1334                        *tail_blk = 0;
1335                        /* leave all other log inited values alone */
1336                        goto done;
1337                }
1338        }
1339
1340        /*
1341         * Search backwards through the log looking for the log record header
1342         * block. This wraps all the way back around to the head so something is
1343         * seriously wrong if we can't find it.
1344         */
1345        error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1346                                      &rhead_blk, &rhead, &wrapped);
1347        if (error < 0)
1348                return error;
1349        if (!error) {
1350                xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1351                return -EIO;
1352        }
1353        *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1354
1355        /*
1356         * Set the log state based on the current head record.
1357         */
1358        xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1359        tail_lsn = atomic64_read(&log->l_tail_lsn);
1360
1361        /*
1362         * Look for an unmount record at the head of the log. This sets the log
1363         * state to determine whether recovery is necessary.
1364         */
1365        error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1366                                       rhead_blk, buffer, &clean);
1367        if (error)
1368                goto done;
1369
1370        /*
1371         * Verify the log head if the log is not clean (e.g., we have anything
1372         * but an unmount record at the head). This uses CRC verification to
1373         * detect and trim torn writes. If discovered, CRC failures are
1374         * considered torn writes and the log head is trimmed accordingly.
1375         *
1376         * Note that we can only run CRC verification when the log is dirty
1377         * because there's no guarantee that the log data behind an unmount
1378         * record is compatible with the current architecture.
1379         */
1380        if (!clean) {
1381                xfs_daddr_t     orig_head = *head_blk;
1382
1383                error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1384                                         &rhead_blk, &rhead, &wrapped);
1385                if (error)
1386                        goto done;
1387
1388                /* update in-core state again if the head changed */
1389                if (*head_blk != orig_head) {
1390                        xlog_set_state(log, *head_blk, rhead, rhead_blk,
1391                                       wrapped);
1392                        tail_lsn = atomic64_read(&log->l_tail_lsn);
1393                        error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1394                                                       rhead, rhead_blk, buffer,
1395                                                       &clean);
1396                        if (error)
1397                                goto done;
1398                }
1399        }
1400
1401        /*
1402         * Note that the unmount was clean. If the unmount was not clean, we
1403         * need to know this to rebuild the superblock counters from the perag
1404         * headers if we have a filesystem using non-persistent counters.
1405         */
1406        if (clean)
1407                log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1408
1409        /*
1410         * Make sure that there are no blocks in front of the head
1411         * with the same cycle number as the head.  This can happen
1412         * because we allow multiple outstanding log writes concurrently,
1413         * and the later writes might make it out before earlier ones.
1414         *
1415         * We use the lsn from before modifying it so that we'll never
1416         * overwrite the unmount record after a clean unmount.
1417         *
1418         * Do this only if we are going to recover the filesystem
1419         *
1420         * NOTE: This used to say "if (!readonly)"
1421         * However on Linux, we can & do recover a read-only filesystem.
1422         * We only skip recovery if NORECOVERY is specified on mount,
1423         * in which case we would not be here.
1424         *
1425         * But... if the -device- itself is readonly, just skip this.
1426         * We can't recover this device anyway, so it won't matter.
1427         */
1428        if (!xfs_readonly_buftarg(log->l_targ))
1429                error = xlog_clear_stale_blocks(log, tail_lsn);
1430
1431done:
1432        kmem_free(buffer);
1433
1434        if (error)
1435                xfs_warn(log->l_mp, "failed to locate log tail");
1436        return error;
1437}
1438
1439/*
1440 * Is the log zeroed at all?
1441 *
1442 * The last binary search should be changed to perform an X block read
1443 * once X becomes small enough.  You can then search linearly through
1444 * the X blocks.  This will cut down on the number of reads we need to do.
1445 *
1446 * If the log is partially zeroed, this routine will pass back the blkno
1447 * of the first block with cycle number 0.  It won't have a complete LR
1448 * preceding it.
1449 *
1450 * Return:
1451 *      0  => the log is completely written to
1452 *      1 => use *blk_no as the first block of the log
1453 *      <0 => error has occurred
1454 */
1455STATIC int
1456xlog_find_zeroed(
1457        struct xlog     *log,
1458        xfs_daddr_t     *blk_no)
1459{
1460        char            *buffer;
1461        char            *offset;
1462        uint            first_cycle, last_cycle;
1463        xfs_daddr_t     new_blk, last_blk, start_blk;
1464        xfs_daddr_t     num_scan_bblks;
1465        int             error, log_bbnum = log->l_logBBsize;
1466
1467        *blk_no = 0;
1468
1469        /* check totally zeroed log */
1470        buffer = xlog_alloc_buffer(log, 1);
1471        if (!buffer)
1472                return -ENOMEM;
1473        error = xlog_bread(log, 0, 1, buffer, &offset);
1474        if (error)
1475                goto out_free_buffer;
1476
1477        first_cycle = xlog_get_cycle(offset);
1478        if (first_cycle == 0) {         /* completely zeroed log */
1479                *blk_no = 0;
1480                kmem_free(buffer);
1481                return 1;
1482        }
1483
1484        /* check partially zeroed log */
1485        error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1486        if (error)
1487                goto out_free_buffer;
1488
1489        last_cycle = xlog_get_cycle(offset);
1490        if (last_cycle != 0) {          /* log completely written to */
1491                kmem_free(buffer);
1492                return 0;
1493        }
1494
1495        /* we have a partially zeroed log */
1496        last_blk = log_bbnum-1;
1497        error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1498        if (error)
1499                goto out_free_buffer;
1500
1501        /*
1502         * Validate the answer.  Because there is no way to guarantee that
1503         * the entire log is made up of log records which are the same size,
1504         * we scan over the defined maximum blocks.  At this point, the maximum
1505         * is not chosen to mean anything special.   XXXmiken
1506         */
1507        num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1508        ASSERT(num_scan_bblks <= INT_MAX);
1509
1510        if (last_blk < num_scan_bblks)
1511                num_scan_bblks = last_blk;
1512        start_blk = last_blk - num_scan_bblks;
1513
1514        /*
1515         * We search for any instances of cycle number 0 that occur before
1516         * our current estimate of the head.  What we're trying to detect is
1517         *        1 ... | 0 | 1 | 0...
1518         *                       ^ binary search ends here
1519         */
1520        if ((error = xlog_find_verify_cycle(log, start_blk,
1521                                         (int)num_scan_bblks, 0, &new_blk)))
1522                goto out_free_buffer;
1523        if (new_blk != -1)
1524                last_blk = new_blk;
1525
1526        /*
1527         * Potentially backup over partial log record write.  We don't need
1528         * to search the end of the log because we know it is zero.
1529         */
1530        error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1531        if (error == 1)
1532                error = -EIO;
1533        if (error)
1534                goto out_free_buffer;
1535
1536        *blk_no = last_blk;
1537out_free_buffer:
1538        kmem_free(buffer);
1539        if (error)
1540                return error;
1541        return 1;
1542}
1543
1544/*
1545 * These are simple subroutines used by xlog_clear_stale_blocks() below
1546 * to initialize a buffer full of empty log record headers and write
1547 * them into the log.
1548 */
1549STATIC void
1550xlog_add_record(
1551        struct xlog             *log,
1552        char                    *buf,
1553        int                     cycle,
1554        int                     block,
1555        int                     tail_cycle,
1556        int                     tail_block)
1557{
1558        xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1559
1560        memset(buf, 0, BBSIZE);
1561        recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1562        recp->h_cycle = cpu_to_be32(cycle);
1563        recp->h_version = cpu_to_be32(
1564                        xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1565        recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1566        recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1567        recp->h_fmt = cpu_to_be32(XLOG_FMT);
1568        memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1569}
1570
1571STATIC int
1572xlog_write_log_records(
1573        struct xlog     *log,
1574        int             cycle,
1575        int             start_block,
1576        int             blocks,
1577        int             tail_cycle,
1578        int             tail_block)
1579{
1580        char            *offset;
1581        char            *buffer;
1582        int             balign, ealign;
1583        int             sectbb = log->l_sectBBsize;
1584        int             end_block = start_block + blocks;
1585        int             bufblks;
1586        int             error = 0;
1587        int             i, j = 0;
1588
1589        /*
1590         * Greedily allocate a buffer big enough to handle the full
1591         * range of basic blocks to be written.  If that fails, try
1592         * a smaller size.  We need to be able to write at least a
1593         * log sector, or we're out of luck.
1594         */
1595        bufblks = 1 << ffs(blocks);
1596        while (bufblks > log->l_logBBsize)
1597                bufblks >>= 1;
1598        while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1599                bufblks >>= 1;
1600                if (bufblks < sectbb)
1601                        return -ENOMEM;
1602        }
1603
1604        /* We may need to do a read at the start to fill in part of
1605         * the buffer in the starting sector not covered by the first
1606         * write below.
1607         */
1608        balign = round_down(start_block, sectbb);
1609        if (balign != start_block) {
1610                error = xlog_bread_noalign(log, start_block, 1, buffer);
1611                if (error)
1612                        goto out_free_buffer;
1613
1614                j = start_block - balign;
1615        }
1616
1617        for (i = start_block; i < end_block; i += bufblks) {
1618                int             bcount, endcount;
1619
1620                bcount = min(bufblks, end_block - start_block);
1621                endcount = bcount - j;
1622
1623                /* We may need to do a read at the end to fill in part of
1624                 * the buffer in the final sector not covered by the write.
1625                 * If this is the same sector as the above read, skip it.
1626                 */
1627                ealign = round_down(end_block, sectbb);
1628                if (j == 0 && (start_block + endcount > ealign)) {
1629                        error = xlog_bread_noalign(log, ealign, sectbb,
1630                                        buffer + BBTOB(ealign - start_block));
1631                        if (error)
1632                                break;
1633
1634                }
1635
1636                offset = buffer + xlog_align(log, start_block);
1637                for (; j < endcount; j++) {
1638                        xlog_add_record(log, offset, cycle, i+j,
1639                                        tail_cycle, tail_block);
1640                        offset += BBSIZE;
1641                }
1642                error = xlog_bwrite(log, start_block, endcount, buffer);
1643                if (error)
1644                        break;
1645                start_block += endcount;
1646                j = 0;
1647        }
1648
1649out_free_buffer:
1650        kmem_free(buffer);
1651        return error;
1652}
1653
1654/*
1655 * This routine is called to blow away any incomplete log writes out
1656 * in front of the log head.  We do this so that we won't become confused
1657 * if we come up, write only a little bit more, and then crash again.
1658 * If we leave the partial log records out there, this situation could
1659 * cause us to think those partial writes are valid blocks since they
1660 * have the current cycle number.  We get rid of them by overwriting them
1661 * with empty log records with the old cycle number rather than the
1662 * current one.
1663 *
1664 * The tail lsn is passed in rather than taken from
1665 * the log so that we will not write over the unmount record after a
1666 * clean unmount in a 512 block log.  Doing so would leave the log without
1667 * any valid log records in it until a new one was written.  If we crashed
1668 * during that time we would not be able to recover.
1669 */
1670STATIC int
1671xlog_clear_stale_blocks(
1672        struct xlog     *log,
1673        xfs_lsn_t       tail_lsn)
1674{
1675        int             tail_cycle, head_cycle;
1676        int             tail_block, head_block;
1677        int             tail_distance, max_distance;
1678        int             distance;
1679        int             error;
1680
1681        tail_cycle = CYCLE_LSN(tail_lsn);
1682        tail_block = BLOCK_LSN(tail_lsn);
1683        head_cycle = log->l_curr_cycle;
1684        head_block = log->l_curr_block;
1685
1686        /*
1687         * Figure out the distance between the new head of the log
1688         * and the tail.  We want to write over any blocks beyond the
1689         * head that we may have written just before the crash, but
1690         * we don't want to overwrite the tail of the log.
1691         */
1692        if (head_cycle == tail_cycle) {
1693                /*
1694                 * The tail is behind the head in the physical log,
1695                 * so the distance from the head to the tail is the
1696                 * distance from the head to the end of the log plus
1697                 * the distance from the beginning of the log to the
1698                 * tail.
1699                 */
1700                if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1701                        XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1702                                         XFS_ERRLEVEL_LOW, log->l_mp);
1703                        return -EFSCORRUPTED;
1704                }
1705                tail_distance = tail_block + (log->l_logBBsize - head_block);
1706        } else {
1707                /*
1708                 * The head is behind the tail in the physical log,
1709                 * so the distance from the head to the tail is just
1710                 * the tail block minus the head block.
1711                 */
1712                if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1713                        XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1714                                         XFS_ERRLEVEL_LOW, log->l_mp);
1715                        return -EFSCORRUPTED;
1716                }
1717                tail_distance = tail_block - head_block;
1718        }
1719
1720        /*
1721         * If the head is right up against the tail, we can't clear
1722         * anything.
1723         */
1724        if (tail_distance <= 0) {
1725                ASSERT(tail_distance == 0);
1726                return 0;
1727        }
1728
1729        max_distance = XLOG_TOTAL_REC_SHIFT(log);
1730        /*
1731         * Take the smaller of the maximum amount of outstanding I/O
1732         * we could have and the distance to the tail to clear out.
1733         * We take the smaller so that we don't overwrite the tail and
1734         * we don't waste all day writing from the head to the tail
1735         * for no reason.
1736         */
1737        max_distance = min(max_distance, tail_distance);
1738
1739        if ((head_block + max_distance) <= log->l_logBBsize) {
1740                /*
1741                 * We can stomp all the blocks we need to without
1742                 * wrapping around the end of the log.  Just do it
1743                 * in a single write.  Use the cycle number of the
1744                 * current cycle minus one so that the log will look like:
1745                 *     n ... | n - 1 ...
1746                 */
1747                error = xlog_write_log_records(log, (head_cycle - 1),
1748                                head_block, max_distance, tail_cycle,
1749                                tail_block);
1750                if (error)
1751                        return error;
1752        } else {
1753                /*
1754                 * We need to wrap around the end of the physical log in
1755                 * order to clear all the blocks.  Do it in two separate
1756                 * I/Os.  The first write should be from the head to the
1757                 * end of the physical log, and it should use the current
1758                 * cycle number minus one just like above.
1759                 */
1760                distance = log->l_logBBsize - head_block;
1761                error = xlog_write_log_records(log, (head_cycle - 1),
1762                                head_block, distance, tail_cycle,
1763                                tail_block);
1764
1765                if (error)
1766                        return error;
1767
1768                /*
1769                 * Now write the blocks at the start of the physical log.
1770                 * This writes the remainder of the blocks we want to clear.
1771                 * It uses the current cycle number since we're now on the
1772                 * same cycle as the head so that we get:
1773                 *    n ... n ... | n - 1 ...
1774                 *    ^^^^^ blocks we're writing
1775                 */
1776                distance = max_distance - (log->l_logBBsize - head_block);
1777                error = xlog_write_log_records(log, head_cycle, 0, distance,
1778                                tail_cycle, tail_block);
1779                if (error)
1780                        return error;
1781        }
1782
1783        return 0;
1784}
1785
1786/******************************************************************************
1787 *
1788 *              Log recover routines
1789 *
1790 ******************************************************************************
1791 */
1792
1793/*
1794 * Sort the log items in the transaction.
1795 *
1796 * The ordering constraints are defined by the inode allocation and unlink
1797 * behaviour. The rules are:
1798 *
1799 *      1. Every item is only logged once in a given transaction. Hence it
1800 *         represents the last logged state of the item. Hence ordering is
1801 *         dependent on the order in which operations need to be performed so
1802 *         required initial conditions are always met.
1803 *
1804 *      2. Cancelled buffers are recorded in pass 1 in a separate table and
1805 *         there's nothing to replay from them so we can simply cull them
1806 *         from the transaction. However, we can't do that until after we've
1807 *         replayed all the other items because they may be dependent on the
1808 *         cancelled buffer and replaying the cancelled buffer can remove it
1809 *         form the cancelled buffer table. Hence they have tobe done last.
1810 *
1811 *      3. Inode allocation buffers must be replayed before inode items that
1812 *         read the buffer and replay changes into it. For filesystems using the
1813 *         ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1814 *         treated the same as inode allocation buffers as they create and
1815 *         initialise the buffers directly.
1816 *
1817 *      4. Inode unlink buffers must be replayed after inode items are replayed.
1818 *         This ensures that inodes are completely flushed to the inode buffer
1819 *         in a "free" state before we remove the unlinked inode list pointer.
1820 *
1821 * Hence the ordering needs to be inode allocation buffers first, inode items
1822 * second, inode unlink buffers third and cancelled buffers last.
1823 *
1824 * But there's a problem with that - we can't tell an inode allocation buffer
1825 * apart from a regular buffer, so we can't separate them. We can, however,
1826 * tell an inode unlink buffer from the others, and so we can separate them out
1827 * from all the other buffers and move them to last.
1828 *
1829 * Hence, 4 lists, in order from head to tail:
1830 *      - buffer_list for all buffers except cancelled/inode unlink buffers
1831 *      - item_list for all non-buffer items
1832 *      - inode_buffer_list for inode unlink buffers
1833 *      - cancel_list for the cancelled buffers
1834 *
1835 * Note that we add objects to the tail of the lists so that first-to-last
1836 * ordering is preserved within the lists. Adding objects to the head of the
1837 * list means when we traverse from the head we walk them in last-to-first
1838 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1839 * but for all other items there may be specific ordering that we need to
1840 * preserve.
1841 */
1842STATIC int
1843xlog_recover_reorder_trans(
1844        struct xlog             *log,
1845        struct xlog_recover     *trans,
1846        int                     pass)
1847{
1848        xlog_recover_item_t     *item, *n;
1849        int                     error = 0;
1850        LIST_HEAD(sort_list);
1851        LIST_HEAD(cancel_list);
1852        LIST_HEAD(buffer_list);
1853        LIST_HEAD(inode_buffer_list);
1854        LIST_HEAD(inode_list);
1855
1856        list_splice_init(&trans->r_itemq, &sort_list);
1857        list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1858                xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
1859
1860                switch (ITEM_TYPE(item)) {
1861                case XFS_LI_ICREATE:
1862                        list_move_tail(&item->ri_list, &buffer_list);
1863                        break;
1864                case XFS_LI_BUF:
1865                        if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1866                                trace_xfs_log_recover_item_reorder_head(log,
1867                                                        trans, item, pass);
1868                                list_move(&item->ri_list, &cancel_list);
1869                                break;
1870                        }
1871                        if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1872                                list_move(&item->ri_list, &inode_buffer_list);
1873                                break;
1874                        }
1875                        list_move_tail(&item->ri_list, &buffer_list);
1876                        break;
1877                case XFS_LI_INODE:
1878                case XFS_LI_DQUOT:
1879                case XFS_LI_QUOTAOFF:
1880                case XFS_LI_EFD:
1881                case XFS_LI_EFI:
1882                case XFS_LI_RUI:
1883                case XFS_LI_RUD:
1884                case XFS_LI_CUI:
1885                case XFS_LI_CUD:
1886                case XFS_LI_BUI:
1887                case XFS_LI_BUD:
1888                        trace_xfs_log_recover_item_reorder_tail(log,
1889                                                        trans, item, pass);
1890                        list_move_tail(&item->ri_list, &inode_list);
1891                        break;
1892                default:
1893                        xfs_warn(log->l_mp,
1894                                "%s: unrecognized type of log operation",
1895                                __func__);
1896                        ASSERT(0);
1897                        /*
1898                         * return the remaining items back to the transaction
1899                         * item list so they can be freed in caller.
1900                         */
1901                        if (!list_empty(&sort_list))
1902                                list_splice_init(&sort_list, &trans->r_itemq);
1903                        error = -EIO;
1904                        goto out;
1905                }
1906        }
1907out:
1908        ASSERT(list_empty(&sort_list));
1909        if (!list_empty(&buffer_list))
1910                list_splice(&buffer_list, &trans->r_itemq);
1911        if (!list_empty(&inode_list))
1912                list_splice_tail(&inode_list, &trans->r_itemq);
1913        if (!list_empty(&inode_buffer_list))
1914                list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1915        if (!list_empty(&cancel_list))
1916                list_splice_tail(&cancel_list, &trans->r_itemq);
1917        return error;
1918}
1919
1920/*
1921 * Build up the table of buf cancel records so that we don't replay
1922 * cancelled data in the second pass.  For buffer records that are
1923 * not cancel records, there is nothing to do here so we just return.
1924 *
1925 * If we get a cancel record which is already in the table, this indicates
1926 * that the buffer was cancelled multiple times.  In order to ensure
1927 * that during pass 2 we keep the record in the table until we reach its
1928 * last occurrence in the log, we keep a reference count in the cancel
1929 * record in the table to tell us how many times we expect to see this
1930 * record during the second pass.
1931 */
1932STATIC int
1933xlog_recover_buffer_pass1(
1934        struct xlog                     *log,
1935        struct xlog_recover_item        *item)
1936{
1937        xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
1938        struct list_head        *bucket;
1939        struct xfs_buf_cancel   *bcp;
1940
1941        /*
1942         * If this isn't a cancel buffer item, then just return.
1943         */
1944        if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1945                trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1946                return 0;
1947        }
1948
1949        /*
1950         * Insert an xfs_buf_cancel record into the hash table of them.
1951         * If there is already an identical record, bump its reference count.
1952         */
1953        bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1954        list_for_each_entry(bcp, bucket, bc_list) {
1955                if (bcp->bc_blkno == buf_f->blf_blkno &&
1956                    bcp->bc_len == buf_f->blf_len) {
1957                        bcp->bc_refcount++;
1958                        trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1959                        return 0;
1960                }
1961        }
1962
1963        bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1964        bcp->bc_blkno = buf_f->blf_blkno;
1965        bcp->bc_len = buf_f->blf_len;
1966        bcp->bc_refcount = 1;
1967        list_add_tail(&bcp->bc_list, bucket);
1968
1969        trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1970        return 0;
1971}
1972
1973/*
1974 * Check to see whether the buffer being recovered has a corresponding
1975 * entry in the buffer cancel record table. If it is, return the cancel
1976 * buffer structure to the caller.
1977 */
1978STATIC struct xfs_buf_cancel *
1979xlog_peek_buffer_cancelled(
1980        struct xlog             *log,
1981        xfs_daddr_t             blkno,
1982        uint                    len,
1983        unsigned short                  flags)
1984{
1985        struct list_head        *bucket;
1986        struct xfs_buf_cancel   *bcp;
1987
1988        if (!log->l_buf_cancel_table) {
1989                /* empty table means no cancelled buffers in the log */
1990                ASSERT(!(flags & XFS_BLF_CANCEL));
1991                return NULL;
1992        }
1993
1994        bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1995        list_for_each_entry(bcp, bucket, bc_list) {
1996                if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1997                        return bcp;
1998        }
1999
2000        /*
2001         * We didn't find a corresponding entry in the table, so return 0 so
2002         * that the buffer is NOT cancelled.
2003         */
2004        ASSERT(!(flags & XFS_BLF_CANCEL));
2005        return NULL;
2006}
2007
2008/*
2009 * If the buffer is being cancelled then return 1 so that it will be cancelled,
2010 * otherwise return 0.  If the buffer is actually a buffer cancel item
2011 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2012 * table and remove it from the table if this is the last reference.
2013 *
2014 * We remove the cancel record from the table when we encounter its last
2015 * occurrence in the log so that if the same buffer is re-used again after its
2016 * last cancellation we actually replay the changes made at that point.
2017 */
2018STATIC int
2019xlog_check_buffer_cancelled(
2020        struct xlog             *log,
2021        xfs_daddr_t             blkno,
2022        uint                    len,
2023        unsigned short                  flags)
2024{
2025        struct xfs_buf_cancel   *bcp;
2026
2027        bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2028        if (!bcp)
2029                return 0;
2030
2031        /*
2032         * We've go a match, so return 1 so that the recovery of this buffer
2033         * is cancelled.  If this buffer is actually a buffer cancel log
2034         * item, then decrement the refcount on the one in the table and
2035         * remove it if this is the last reference.
2036         */
2037        if (flags & XFS_BLF_CANCEL) {
2038                if (--bcp->bc_refcount == 0) {
2039                        list_del(&bcp->bc_list);
2040                        kmem_free(bcp);
2041                }
2042        }
2043        return 1;
2044}
2045
2046/*
2047 * Perform recovery for a buffer full of inodes.  In these buffers, the only
2048 * data which should be recovered is that which corresponds to the
2049 * di_next_unlinked pointers in the on disk inode structures.  The rest of the
2050 * data for the inodes is always logged through the inodes themselves rather
2051 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2052 *
2053 * The only time when buffers full of inodes are fully recovered is when the
2054 * buffer is full of newly allocated inodes.  In this case the buffer will
2055 * not be marked as an inode buffer and so will be sent to
2056 * xlog_recover_do_reg_buffer() below during recovery.
2057 */
2058STATIC int
2059xlog_recover_do_inode_buffer(
2060        struct xfs_mount        *mp,
2061        xlog_recover_item_t     *item,
2062        struct xfs_buf          *bp,
2063        xfs_buf_log_format_t    *buf_f)
2064{
2065        int                     i;
2066        int                     item_index = 0;
2067        int                     bit = 0;
2068        int                     nbits = 0;
2069        int                     reg_buf_offset = 0;
2070        int                     reg_buf_bytes = 0;
2071        int                     next_unlinked_offset;
2072        int                     inodes_per_buf;
2073        xfs_agino_t             *logged_nextp;
2074        xfs_agino_t             *buffer_nextp;
2075
2076        trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2077
2078        /*
2079         * Post recovery validation only works properly on CRC enabled
2080         * filesystems.
2081         */
2082        if (xfs_sb_version_hascrc(&mp->m_sb))
2083                bp->b_ops = &xfs_inode_buf_ops;
2084
2085        inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
2086        for (i = 0; i < inodes_per_buf; i++) {
2087                next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2088                        offsetof(xfs_dinode_t, di_next_unlinked);
2089
2090                while (next_unlinked_offset >=
2091                       (reg_buf_offset + reg_buf_bytes)) {
2092                        /*
2093                         * The next di_next_unlinked field is beyond
2094                         * the current logged region.  Find the next
2095                         * logged region that contains or is beyond
2096                         * the current di_next_unlinked field.
2097                         */
2098                        bit += nbits;
2099                        bit = xfs_next_bit(buf_f->blf_data_map,
2100                                           buf_f->blf_map_size, bit);
2101
2102                        /*
2103                         * If there are no more logged regions in the
2104                         * buffer, then we're done.
2105                         */
2106                        if (bit == -1)
2107                                return 0;
2108
2109                        nbits = xfs_contig_bits(buf_f->blf_data_map,
2110                                                buf_f->blf_map_size, bit);
2111                        ASSERT(nbits > 0);
2112                        reg_buf_offset = bit << XFS_BLF_SHIFT;
2113                        reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2114                        item_index++;
2115                }
2116
2117                /*
2118                 * If the current logged region starts after the current
2119                 * di_next_unlinked field, then move on to the next
2120                 * di_next_unlinked field.
2121                 */
2122                if (next_unlinked_offset < reg_buf_offset)
2123                        continue;
2124
2125                ASSERT(item->ri_buf[item_index].i_addr != NULL);
2126                ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2127                ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
2128
2129                /*
2130                 * The current logged region contains a copy of the
2131                 * current di_next_unlinked field.  Extract its value
2132                 * and copy it to the buffer copy.
2133                 */
2134                logged_nextp = item->ri_buf[item_index].i_addr +
2135                                next_unlinked_offset - reg_buf_offset;
2136                if (unlikely(*logged_nextp == 0)) {
2137                        xfs_alert(mp,
2138                "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
2139                "Trying to replay bad (0) inode di_next_unlinked field.",
2140                                item, bp);
2141                        XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2142                                         XFS_ERRLEVEL_LOW, mp);
2143                        return -EFSCORRUPTED;
2144                }
2145
2146                buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2147                *buffer_nextp = *logged_nextp;
2148
2149                /*
2150                 * If necessary, recalculate the CRC in the on-disk inode. We
2151                 * have to leave the inode in a consistent state for whoever
2152                 * reads it next....
2153                 */
2154                xfs_dinode_calc_crc(mp,
2155                                xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2156
2157        }
2158
2159        return 0;
2160}
2161
2162/*
2163 * V5 filesystems know the age of the buffer on disk being recovered. We can
2164 * have newer objects on disk than we are replaying, and so for these cases we
2165 * don't want to replay the current change as that will make the buffer contents
2166 * temporarily invalid on disk.
2167 *
2168 * The magic number might not match the buffer type we are going to recover
2169 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
2170 * extract the LSN of the existing object in the buffer based on it's current
2171 * magic number.  If we don't recognise the magic number in the buffer, then
2172 * return a LSN of -1 so that the caller knows it was an unrecognised block and
2173 * so can recover the buffer.
2174 *
2175 * Note: we cannot rely solely on magic number matches to determine that the
2176 * buffer has a valid LSN - we also need to verify that it belongs to this
2177 * filesystem, so we need to extract the object's LSN and compare it to that
2178 * which we read from the superblock. If the UUIDs don't match, then we've got a
2179 * stale metadata block from an old filesystem instance that we need to recover
2180 * over the top of.
2181 */
2182static xfs_lsn_t
2183xlog_recover_get_buf_lsn(
2184        struct xfs_mount        *mp,
2185        struct xfs_buf          *bp)
2186{
2187        uint32_t                magic32;
2188        uint16_t                magic16;
2189        uint16_t                magicda;
2190        void                    *blk = bp->b_addr;
2191        uuid_t                  *uuid;
2192        xfs_lsn_t               lsn = -1;
2193
2194        /* v4 filesystems always recover immediately */
2195        if (!xfs_sb_version_hascrc(&mp->m_sb))
2196                goto recover_immediately;
2197
2198        magic32 = be32_to_cpu(*(__be32 *)blk);
2199        switch (magic32) {
2200        case XFS_ABTB_CRC_MAGIC:
2201        case XFS_ABTC_CRC_MAGIC:
2202        case XFS_ABTB_MAGIC:
2203        case XFS_ABTC_MAGIC:
2204        case XFS_RMAP_CRC_MAGIC:
2205        case XFS_REFC_CRC_MAGIC:
2206        case XFS_IBT_CRC_MAGIC:
2207        case XFS_IBT_MAGIC: {
2208                struct xfs_btree_block *btb = blk;
2209
2210                lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2211                uuid = &btb->bb_u.s.bb_uuid;
2212                break;
2213        }
2214        case XFS_BMAP_CRC_MAGIC:
2215        case XFS_BMAP_MAGIC: {
2216                struct xfs_btree_block *btb = blk;
2217
2218                lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2219                uuid = &btb->bb_u.l.bb_uuid;
2220                break;
2221        }
2222        case XFS_AGF_MAGIC:
2223                lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2224                uuid = &((struct xfs_agf *)blk)->agf_uuid;
2225                break;
2226        case XFS_AGFL_MAGIC:
2227                lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2228                uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2229                break;
2230        case XFS_AGI_MAGIC:
2231                lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2232                uuid = &((struct xfs_agi *)blk)->agi_uuid;
2233                break;
2234        case XFS_SYMLINK_MAGIC:
2235                lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2236                uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2237                break;
2238        case XFS_DIR3_BLOCK_MAGIC:
2239        case XFS_DIR3_DATA_MAGIC:
2240        case XFS_DIR3_FREE_MAGIC:
2241                lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2242                uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2243                break;
2244        case XFS_ATTR3_RMT_MAGIC:
2245                /*
2246                 * Remote attr blocks are written synchronously, rather than
2247                 * being logged. That means they do not contain a valid LSN
2248                 * (i.e. transactionally ordered) in them, and hence any time we
2249                 * see a buffer to replay over the top of a remote attribute
2250                 * block we should simply do so.
2251                 */
2252                goto recover_immediately;
2253        case XFS_SB_MAGIC:
2254                /*
2255                 * superblock uuids are magic. We may or may not have a
2256                 * sb_meta_uuid on disk, but it will be set in the in-core
2257                 * superblock. We set the uuid pointer for verification
2258                 * according to the superblock feature mask to ensure we check
2259                 * the relevant UUID in the superblock.
2260                 */
2261                lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2262                if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2263                        uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2264                else
2265                        uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2266                break;
2267        default:
2268                break;
2269        }
2270
2271        if (lsn != (xfs_lsn_t)-1) {
2272                if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2273                        goto recover_immediately;
2274                return lsn;
2275        }
2276
2277        magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2278        switch (magicda) {
2279        case XFS_DIR3_LEAF1_MAGIC:
2280        case XFS_DIR3_LEAFN_MAGIC:
2281        case XFS_DA3_NODE_MAGIC:
2282                lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2283                uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2284                break;
2285        default:
2286                break;
2287        }
2288
2289        if (lsn != (xfs_lsn_t)-1) {
2290                if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2291                        goto recover_immediately;
2292                return lsn;
2293        }
2294
2295        /*
2296         * We do individual object checks on dquot and inode buffers as they
2297         * have their own individual LSN records. Also, we could have a stale
2298         * buffer here, so we have to at least recognise these buffer types.
2299         *
2300         * A notd complexity here is inode unlinked list processing - it logs
2301         * the inode directly in the buffer, but we don't know which inodes have
2302         * been modified, and there is no global buffer LSN. Hence we need to
2303         * recover all inode buffer types immediately. This problem will be
2304         * fixed by logical logging of the unlinked list modifications.
2305         */
2306        magic16 = be16_to_cpu(*(__be16 *)blk);
2307        switch (magic16) {
2308        case XFS_DQUOT_MAGIC:
2309        case XFS_DINODE_MAGIC:
2310                goto recover_immediately;
2311        default:
2312                break;
2313        }
2314
2315        /* unknown buffer contents, recover immediately */
2316
2317recover_immediately:
2318        return (xfs_lsn_t)-1;
2319
2320}
2321
2322/*
2323 * Validate the recovered buffer is of the correct type and attach the
2324 * appropriate buffer operations to them for writeback. Magic numbers are in a
2325 * few places:
2326 *      the first 16 bits of the buffer (inode buffer, dquot buffer),
2327 *      the first 32 bits of the buffer (most blocks),
2328 *      inside a struct xfs_da_blkinfo at the start of the buffer.
2329 */
2330static void
2331xlog_recover_validate_buf_type(
2332        struct xfs_mount        *mp,
2333        struct xfs_buf          *bp,
2334        xfs_buf_log_format_t    *buf_f,
2335        xfs_lsn_t               current_lsn)
2336{
2337        struct xfs_da_blkinfo   *info = bp->b_addr;
2338        uint32_t                magic32;
2339        uint16_t                magic16;
2340        uint16_t                magicda;
2341        char                    *warnmsg = NULL;
2342
2343        /*
2344         * We can only do post recovery validation on items on CRC enabled
2345         * fielsystems as we need to know when the buffer was written to be able
2346         * to determine if we should have replayed the item. If we replay old
2347         * metadata over a newer buffer, then it will enter a temporarily
2348         * inconsistent state resulting in verification failures. Hence for now
2349         * just avoid the verification stage for non-crc filesystems
2350         */
2351        if (!xfs_sb_version_hascrc(&mp->m_sb))
2352                return;
2353
2354        magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2355        magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2356        magicda = be16_to_cpu(info->magic);
2357        switch (xfs_blft_from_flags(buf_f)) {
2358        case XFS_BLFT_BTREE_BUF:
2359                switch (magic32) {
2360                case XFS_ABTB_CRC_MAGIC:
2361                case XFS_ABTB_MAGIC:
2362                        bp->b_ops = &xfs_bnobt_buf_ops;
2363                        break;
2364                case XFS_ABTC_CRC_MAGIC:
2365                case XFS_ABTC_MAGIC:
2366                        bp->b_ops = &xfs_cntbt_buf_ops;
2367                        break;
2368                case XFS_IBT_CRC_MAGIC:
2369                case XFS_IBT_MAGIC:
2370                        bp->b_ops = &xfs_inobt_buf_ops;
2371                        break;
2372                case XFS_FIBT_CRC_MAGIC:
2373                case XFS_FIBT_MAGIC:
2374                        bp->b_ops = &xfs_finobt_buf_ops;
2375                        break;
2376                case XFS_BMAP_CRC_MAGIC:
2377                case XFS_BMAP_MAGIC:
2378                        bp->b_ops = &xfs_bmbt_buf_ops;
2379                        break;
2380                case XFS_RMAP_CRC_MAGIC:
2381                        bp->b_ops = &xfs_rmapbt_buf_ops;
2382                        break;
2383                case XFS_REFC_CRC_MAGIC:
2384                        bp->b_ops = &xfs_refcountbt_buf_ops;
2385                        break;
2386                default:
2387                        warnmsg = "Bad btree block magic!";
2388                        break;
2389                }
2390                break;
2391        case XFS_BLFT_AGF_BUF:
2392                if (magic32 != XFS_AGF_MAGIC) {
2393                        warnmsg = "Bad AGF block magic!";
2394                        break;
2395                }
2396                bp->b_ops = &xfs_agf_buf_ops;
2397                break;
2398        case XFS_BLFT_AGFL_BUF:
2399                if (magic32 != XFS_AGFL_MAGIC) {
2400                        warnmsg = "Bad AGFL block magic!";
2401                        break;
2402                }
2403                bp->b_ops = &xfs_agfl_buf_ops;
2404                break;
2405        case XFS_BLFT_AGI_BUF:
2406                if (magic32 != XFS_AGI_MAGIC) {
2407                        warnmsg = "Bad AGI block magic!";
2408                        break;
2409                }
2410                bp->b_ops = &xfs_agi_buf_ops;
2411                break;
2412        case XFS_BLFT_UDQUOT_BUF:
2413        case XFS_BLFT_PDQUOT_BUF:
2414        case XFS_BLFT_GDQUOT_BUF:
2415#ifdef CONFIG_XFS_QUOTA
2416                if (magic16 != XFS_DQUOT_MAGIC) {
2417                        warnmsg = "Bad DQUOT block magic!";
2418                        break;
2419                }
2420                bp->b_ops = &xfs_dquot_buf_ops;
2421#else
2422                xfs_alert(mp,
2423        "Trying to recover dquots without QUOTA support built in!");
2424                ASSERT(0);
2425#endif
2426                break;
2427        case XFS_BLFT_DINO_BUF:
2428                if (magic16 != XFS_DINODE_MAGIC) {
2429                        warnmsg = "Bad INODE block magic!";
2430                        break;
2431                }
2432                bp->b_ops = &xfs_inode_buf_ops;
2433                break;
2434        case XFS_BLFT_SYMLINK_BUF:
2435                if (magic32 != XFS_SYMLINK_MAGIC) {
2436                        warnmsg = "Bad symlink block magic!";
2437                        break;
2438                }
2439                bp->b_ops = &xfs_symlink_buf_ops;
2440                break;
2441        case XFS_BLFT_DIR_BLOCK_BUF:
2442                if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2443                    magic32 != XFS_DIR3_BLOCK_MAGIC) {
2444                        warnmsg = "Bad dir block magic!";
2445                        break;
2446                }
2447                bp->b_ops = &xfs_dir3_block_buf_ops;
2448                break;
2449        case XFS_BLFT_DIR_DATA_BUF:
2450                if (magic32 != XFS_DIR2_DATA_MAGIC &&
2451                    magic32 != XFS_DIR3_DATA_MAGIC) {
2452                        warnmsg = "Bad dir data magic!";
2453                        break;
2454                }
2455                bp->b_ops = &xfs_dir3_data_buf_ops;
2456                break;
2457        case XFS_BLFT_DIR_FREE_BUF:
2458                if (magic32 != XFS_DIR2_FREE_MAGIC &&
2459                    magic32 != XFS_DIR3_FREE_MAGIC) {
2460                        warnmsg = "Bad dir3 free magic!";
2461                        break;
2462                }
2463                bp->b_ops = &xfs_dir3_free_buf_ops;
2464                break;
2465        case XFS_BLFT_DIR_LEAF1_BUF:
2466                if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2467                    magicda != XFS_DIR3_LEAF1_MAGIC) {
2468                        warnmsg = "Bad dir leaf1 magic!";
2469                        break;
2470                }
2471                bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2472                break;
2473        case XFS_BLFT_DIR_LEAFN_BUF:
2474                if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2475                    magicda != XFS_DIR3_LEAFN_MAGIC) {
2476                        warnmsg = "Bad dir leafn magic!";
2477                        break;
2478                }
2479                bp->b_ops = &xfs_dir3_leafn_buf_ops;
2480                break;
2481        case XFS_BLFT_DA_NODE_BUF:
2482                if (magicda != XFS_DA_NODE_MAGIC &&
2483                    magicda != XFS_DA3_NODE_MAGIC) {
2484                        warnmsg = "Bad da node magic!";
2485                        break;
2486                }
2487                bp->b_ops = &xfs_da3_node_buf_ops;
2488                break;
2489        case XFS_BLFT_ATTR_LEAF_BUF:
2490                if (magicda != XFS_ATTR_LEAF_MAGIC &&
2491                    magicda != XFS_ATTR3_LEAF_MAGIC) {
2492                        warnmsg = "Bad attr leaf magic!";
2493                        break;
2494                }
2495                bp->b_ops = &xfs_attr3_leaf_buf_ops;
2496                break;
2497        case XFS_BLFT_ATTR_RMT_BUF:
2498                if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2499                        warnmsg = "Bad attr remote magic!";
2500                        break;
2501                }
2502                bp->b_ops = &xfs_attr3_rmt_buf_ops;
2503                break;
2504        case XFS_BLFT_SB_BUF:
2505                if (magic32 != XFS_SB_MAGIC) {
2506                        warnmsg = "Bad SB block magic!";
2507                        break;
2508                }
2509                bp->b_ops = &xfs_sb_buf_ops;
2510                break;
2511#ifdef CONFIG_XFS_RT
2512        case XFS_BLFT_RTBITMAP_BUF:
2513        case XFS_BLFT_RTSUMMARY_BUF:
2514                /* no magic numbers for verification of RT buffers */
2515                bp->b_ops = &xfs_rtbuf_ops;
2516                break;
2517#endif /* CONFIG_XFS_RT */
2518        default:
2519                xfs_warn(mp, "Unknown buffer type %d!",
2520                         xfs_blft_from_flags(buf_f));
2521                break;
2522        }
2523
2524        /*
2525         * Nothing else to do in the case of a NULL current LSN as this means
2526         * the buffer is more recent than the change in the log and will be
2527         * skipped.
2528         */
2529        if (current_lsn == NULLCOMMITLSN)
2530                return;
2531
2532        if (warnmsg) {
2533                xfs_warn(mp, warnmsg);
2534                ASSERT(0);
2535        }
2536
2537        /*
2538         * We must update the metadata LSN of the buffer as it is written out to
2539         * ensure that older transactions never replay over this one and corrupt
2540         * the buffer. This can occur if log recovery is interrupted at some
2541         * point after the current transaction completes, at which point a
2542         * subsequent mount starts recovery from the beginning.
2543         *
2544         * Write verifiers update the metadata LSN from log items attached to
2545         * the buffer. Therefore, initialize a bli purely to carry the LSN to
2546         * the verifier. We'll clean it up in our ->iodone() callback.
2547         */
2548        if (bp->b_ops) {
2549                struct xfs_buf_log_item *bip;
2550
2551                ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2552                bp->b_iodone = xlog_recover_iodone;
2553                xfs_buf_item_init(bp, mp);
2554                bip = bp->b_log_item;
2555                bip->bli_item.li_lsn = current_lsn;
2556        }
2557}
2558
2559/*
2560 * Perform a 'normal' buffer recovery.  Each logged region of the
2561 * buffer should be copied over the corresponding region in the
2562 * given buffer.  The bitmap in the buf log format structure indicates
2563 * where to place the logged data.
2564 */
2565STATIC void
2566xlog_recover_do_reg_buffer(
2567        struct xfs_mount        *mp,
2568        xlog_recover_item_t     *item,
2569        struct xfs_buf          *bp,
2570        xfs_buf_log_format_t    *buf_f,
2571        xfs_lsn_t               current_lsn)
2572{
2573        int                     i;
2574        int                     bit;
2575        int                     nbits;
2576        xfs_failaddr_t          fa;
2577
2578        trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2579
2580        bit = 0;
2581        i = 1;  /* 0 is the buf format structure */
2582        while (1) {
2583                bit = xfs_next_bit(buf_f->blf_data_map,
2584                                   buf_f->blf_map_size, bit);
2585                if (bit == -1)
2586                        break;
2587                nbits = xfs_contig_bits(buf_f->blf_data_map,
2588                                        buf_f->blf_map_size, bit);
2589                ASSERT(nbits > 0);
2590                ASSERT(item->ri_buf[i].i_addr != NULL);
2591                ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2592                ASSERT(BBTOB(bp->b_length) >=
2593                       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2594
2595                /*
2596                 * The dirty regions logged in the buffer, even though
2597                 * contiguous, may span multiple chunks. This is because the
2598                 * dirty region may span a physical page boundary in a buffer
2599                 * and hence be split into two separate vectors for writing into
2600                 * the log. Hence we need to trim nbits back to the length of
2601                 * the current region being copied out of the log.
2602                 */
2603                if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2604                        nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2605
2606                /*
2607                 * Do a sanity check if this is a dquot buffer. Just checking
2608                 * the first dquot in the buffer should do. XXXThis is
2609                 * probably a good thing to do for other buf types also.
2610                 */
2611                fa = NULL;
2612                if (buf_f->blf_flags &
2613                   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2614                        if (item->ri_buf[i].i_addr == NULL) {
2615                                xfs_alert(mp,
2616                                        "XFS: NULL dquot in %s.", __func__);
2617                                goto next;
2618                        }
2619                        if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2620                                xfs_alert(mp,
2621                                        "XFS: dquot too small (%d) in %s.",
2622                                        item->ri_buf[i].i_len, __func__);
2623                                goto next;
2624                        }
2625                        fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
2626                                               -1, 0);
2627                        if (fa) {
2628                                xfs_alert(mp,
2629        "dquot corrupt at %pS trying to replay into block 0x%llx",
2630                                        fa, bp->b_bn);
2631                                goto next;
2632                        }
2633                }
2634
2635                memcpy(xfs_buf_offset(bp,
2636                        (uint)bit << XFS_BLF_SHIFT),    /* dest */
2637                        item->ri_buf[i].i_addr,         /* source */
2638                        nbits<<XFS_BLF_SHIFT);          /* length */
2639 next:
2640                i++;
2641                bit += nbits;
2642        }
2643
2644        /* Shouldn't be any more regions */
2645        ASSERT(i == item->ri_total);
2646
2647        xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2648}
2649
2650/*
2651 * Perform a dquot buffer recovery.
2652 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2653 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2654 * Else, treat it as a regular buffer and do recovery.
2655 *
2656 * Return false if the buffer was tossed and true if we recovered the buffer to
2657 * indicate to the caller if the buffer needs writing.
2658 */
2659STATIC bool
2660xlog_recover_do_dquot_buffer(
2661        struct xfs_mount                *mp,
2662        struct xlog                     *log,
2663        struct xlog_recover_item        *item,
2664        struct xfs_buf                  *bp,
2665        struct xfs_buf_log_format       *buf_f)
2666{
2667        uint                    type;
2668
2669        trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2670
2671        /*
2672         * Filesystems are required to send in quota flags at mount time.
2673         */
2674        if (!mp->m_qflags)
2675                return false;
2676
2677        type = 0;
2678        if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2679                type |= XFS_DQ_USER;
2680        if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2681                type |= XFS_DQ_PROJ;
2682        if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2683                type |= XFS_DQ_GROUP;
2684        /*
2685         * This type of quotas was turned off, so ignore this buffer
2686         */
2687        if (log->l_quotaoffs_flag & type)
2688                return false;
2689
2690        xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2691        return true;
2692}
2693
2694/*
2695 * This routine replays a modification made to a buffer at runtime.
2696 * There are actually two types of buffer, regular and inode, which
2697 * are handled differently.  Inode buffers are handled differently
2698 * in that we only recover a specific set of data from them, namely
2699 * the inode di_next_unlinked fields.  This is because all other inode
2700 * data is actually logged via inode records and any data we replay
2701 * here which overlaps that may be stale.
2702 *
2703 * When meta-data buffers are freed at run time we log a buffer item
2704 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2705 * of the buffer in the log should not be replayed at recovery time.
2706 * This is so that if the blocks covered by the buffer are reused for
2707 * file data before we crash we don't end up replaying old, freed
2708 * meta-data into a user's file.
2709 *
2710 * To handle the cancellation of buffer log items, we make two passes
2711 * over the log during recovery.  During the first we build a table of
2712 * those buffers which have been cancelled, and during the second we
2713 * only replay those buffers which do not have corresponding cancel
2714 * records in the table.  See xlog_recover_buffer_pass[1,2] above
2715 * for more details on the implementation of the table of cancel records.
2716 */
2717STATIC int
2718xlog_recover_buffer_pass2(
2719        struct xlog                     *log,
2720        struct list_head                *buffer_list,
2721        struct xlog_recover_item        *item,
2722        xfs_lsn_t                       current_lsn)
2723{
2724        xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
2725        xfs_mount_t             *mp = log->l_mp;
2726        xfs_buf_t               *bp;
2727        int                     error;
2728        uint                    buf_flags;
2729        xfs_lsn_t               lsn;
2730
2731        /*
2732         * In this pass we only want to recover all the buffers which have
2733         * not been cancelled and are not cancellation buffers themselves.
2734         */
2735        if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2736                        buf_f->blf_len, buf_f->blf_flags)) {
2737                trace_xfs_log_recover_buf_cancel(log, buf_f);
2738                return 0;
2739        }
2740
2741        trace_xfs_log_recover_buf_recover(log, buf_f);
2742
2743        buf_flags = 0;
2744        if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2745                buf_flags |= XBF_UNMAPPED;
2746
2747        bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2748                          buf_flags, NULL);
2749        if (!bp)
2750                return -ENOMEM;
2751        error = bp->b_error;
2752        if (error) {
2753                xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2754                goto out_release;
2755        }
2756
2757        /*
2758         * Recover the buffer only if we get an LSN from it and it's less than
2759         * the lsn of the transaction we are replaying.
2760         *
2761         * Note that we have to be extremely careful of readahead here.
2762         * Readahead does not attach verfiers to the buffers so if we don't
2763         * actually do any replay after readahead because of the LSN we found
2764         * in the buffer if more recent than that current transaction then we
2765         * need to attach the verifier directly. Failure to do so can lead to
2766         * future recovery actions (e.g. EFI and unlinked list recovery) can
2767         * operate on the buffers and they won't get the verifier attached. This
2768         * can lead to blocks on disk having the correct content but a stale
2769         * CRC.
2770         *
2771         * It is safe to assume these clean buffers are currently up to date.
2772         * If the buffer is dirtied by a later transaction being replayed, then
2773         * the verifier will be reset to match whatever recover turns that
2774         * buffer into.
2775         */
2776        lsn = xlog_recover_get_buf_lsn(mp, bp);
2777        if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2778                trace_xfs_log_recover_buf_skip(log, buf_f);
2779                xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2780                goto out_release;
2781        }
2782
2783        if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2784                error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2785                if (error)
2786                        goto out_release;
2787        } else if (buf_f->blf_flags &
2788                  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2789                bool    dirty;
2790
2791                dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2792                if (!dirty)
2793                        goto out_release;
2794        } else {
2795                xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2796        }
2797
2798        /*
2799         * Perform delayed write on the buffer.  Asynchronous writes will be
2800         * slower when taking into account all the buffers to be flushed.
2801         *
2802         * Also make sure that only inode buffers with good sizes stay in
2803         * the buffer cache.  The kernel moves inodes in buffers of 1 block
2804         * or inode_cluster_size bytes, whichever is bigger.  The inode
2805         * buffers in the log can be a different size if the log was generated
2806         * by an older kernel using unclustered inode buffers or a newer kernel
2807         * running with a different inode cluster size.  Regardless, if the
2808         * the inode buffer size isn't max(blocksize, inode_cluster_size)
2809         * for *our* value of inode_cluster_size, then we need to keep
2810         * the buffer out of the buffer cache so that the buffer won't
2811         * overlap with future reads of those inodes.
2812         */
2813        if (XFS_DINODE_MAGIC ==
2814            be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2815            (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
2816                xfs_buf_stale(bp);
2817                error = xfs_bwrite(bp);
2818        } else {
2819                ASSERT(bp->b_mount == mp);
2820                bp->b_iodone = xlog_recover_iodone;
2821                xfs_buf_delwri_queue(bp, buffer_list);
2822        }
2823
2824out_release:
2825        xfs_buf_relse(bp);
2826        return error;
2827}
2828
2829/*
2830 * Inode fork owner changes
2831 *
2832 * If we have been told that we have to reparent the inode fork, it's because an
2833 * extent swap operation on a CRC enabled filesystem has been done and we are
2834 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2835 * owners of it.
2836 *
2837 * The complexity here is that we don't have an inode context to work with, so
2838 * after we've replayed the inode we need to instantiate one.  This is where the
2839 * fun begins.
2840 *
2841 * We are in the middle of log recovery, so we can't run transactions. That
2842 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2843 * that will result in the corresponding iput() running the inode through
2844 * xfs_inactive(). If we've just replayed an inode core that changes the link
2845 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2846 * transactions (bad!).
2847 *
2848 * So, to avoid this, we instantiate an inode directly from the inode core we've
2849 * just recovered. We have the buffer still locked, and all we really need to
2850 * instantiate is the inode core and the forks being modified. We can do this
2851 * manually, then run the inode btree owner change, and then tear down the
2852 * xfs_inode without having to run any transactions at all.
2853 *
2854 * Also, because we don't have a transaction context available here but need to
2855 * gather all the buffers we modify for writeback so we pass the buffer_list
2856 * instead for the operation to use.
2857 */
2858
2859STATIC int
2860xfs_recover_inode_owner_change(
2861        struct xfs_mount        *mp,
2862        struct xfs_dinode       *dip,
2863        struct xfs_inode_log_format *in_f,
2864        struct list_head        *buffer_list)
2865{
2866        struct xfs_inode        *ip;
2867        int                     error;
2868
2869        ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2870
2871        ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2872        if (!ip)
2873                return -ENOMEM;
2874
2875        /* instantiate the inode */
2876        xfs_inode_from_disk(ip, dip);
2877        ASSERT(ip->i_d.di_version >= 3);
2878
2879        error = xfs_iformat_fork(ip, dip);
2880        if (error)
2881                goto out_free_ip;
2882
2883        if (!xfs_inode_verify_forks(ip)) {
2884                error = -EFSCORRUPTED;
2885                goto out_free_ip;
2886        }
2887
2888        if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2889                ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2890                error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2891                                              ip->i_ino, buffer_list);
2892                if (error)
2893                        goto out_free_ip;
2894        }
2895
2896        if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2897                ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2898                error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2899                                              ip->i_ino, buffer_list);
2900                if (error)
2901                        goto out_free_ip;
2902        }
2903
2904out_free_ip:
2905        xfs_inode_free(ip);
2906        return error;
2907}
2908
2909STATIC int
2910xlog_recover_inode_pass2(
2911        struct xlog                     *log,
2912        struct list_head                *buffer_list,
2913        struct xlog_recover_item        *item,
2914        xfs_lsn_t                       current_lsn)
2915{
2916        struct xfs_inode_log_format     *in_f;
2917        xfs_mount_t             *mp = log->l_mp;
2918        xfs_buf_t               *bp;
2919        xfs_dinode_t            *dip;
2920        int                     len;
2921        char                    *src;
2922        char                    *dest;
2923        int                     error;
2924        int                     attr_index;
2925        uint                    fields;
2926        struct xfs_log_dinode   *ldip;
2927        uint                    isize;
2928        int                     need_free = 0;
2929
2930        if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
2931                in_f = item->ri_buf[0].i_addr;
2932        } else {
2933                in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP);
2934                need_free = 1;
2935                error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2936                if (error)
2937                        goto error;
2938        }
2939
2940        /*
2941         * Inode buffers can be freed, look out for it,
2942         * and do not replay the inode.
2943         */
2944        if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2945                                        in_f->ilf_len, 0)) {
2946                error = 0;
2947                trace_xfs_log_recover_inode_cancel(log, in_f);
2948                goto error;
2949        }
2950        trace_xfs_log_recover_inode_recover(log, in_f);
2951
2952        bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2953                          &xfs_inode_buf_ops);
2954        if (!bp) {
2955                error = -ENOMEM;
2956                goto error;
2957        }
2958        error = bp->b_error;
2959        if (error) {
2960                xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2961                goto out_release;
2962        }
2963        ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2964        dip = xfs_buf_offset(bp, in_f->ilf_boffset);
2965
2966        /*
2967         * Make sure the place we're flushing out to really looks
2968         * like an inode!
2969         */
2970        if (unlikely(!xfs_verify_magic16(bp, dip->di_magic))) {
2971                xfs_alert(mp,
2972        "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
2973                        __func__, dip, bp, in_f->ilf_ino);
2974                XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2975                                 XFS_ERRLEVEL_LOW, mp);
2976                error = -EFSCORRUPTED;
2977                goto out_release;
2978        }
2979        ldip = item->ri_buf[1].i_addr;
2980        if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
2981                xfs_alert(mp,
2982                        "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
2983                        __func__, item, in_f->ilf_ino);
2984                XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2985                                 XFS_ERRLEVEL_LOW, mp);
2986                error = -EFSCORRUPTED;
2987                goto out_release;
2988        }
2989
2990        /*
2991         * If the inode has an LSN in it, recover the inode only if it's less
2992         * than the lsn of the transaction we are replaying. Note: we still
2993         * need to replay an owner change even though the inode is more recent
2994         * than the transaction as there is no guarantee that all the btree
2995         * blocks are more recent than this transaction, too.
2996         */
2997        if (dip->di_version >= 3) {
2998                xfs_lsn_t       lsn = be64_to_cpu(dip->di_lsn);
2999
3000                if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3001                        trace_xfs_log_recover_inode_skip(log, in_f);
3002                        error = 0;
3003                        goto out_owner_change;
3004                }
3005        }
3006
3007        /*
3008         * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3009         * are transactional and if ordering is necessary we can determine that
3010         * more accurately by the LSN field in the V3 inode core. Don't trust
3011         * the inode versions we might be changing them here - use the
3012         * superblock flag to determine whether we need to look at di_flushiter
3013         * to skip replay when the on disk inode is newer than the log one
3014         */
3015        if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3016            ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3017                /*
3018                 * Deal with the wrap case, DI_MAX_FLUSH is less
3019                 * than smaller numbers
3020                 */
3021                if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3022                    ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3023                        /* do nothing */
3024                } else {
3025                        trace_xfs_log_recover_inode_skip(log, in_f);
3026                        error = 0;
3027                        goto out_release;
3028                }
3029        }
3030
3031        /* Take the opportunity to reset the flush iteration count */
3032        ldip->di_flushiter = 0;
3033
3034        if (unlikely(S_ISREG(ldip->di_mode))) {
3035                if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3036                    (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3037                        XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3038                                         XFS_ERRLEVEL_LOW, mp, ldip,
3039                                         sizeof(*ldip));
3040                        xfs_alert(mp,
3041                "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
3042                "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3043                                __func__, item, dip, bp, in_f->ilf_ino);
3044                        error = -EFSCORRUPTED;
3045                        goto out_release;
3046                }
3047        } else if (unlikely(S_ISDIR(ldip->di_mode))) {
3048                if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3049                    (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3050                    (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3051                        XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3052                                             XFS_ERRLEVEL_LOW, mp, ldip,
3053                                             sizeof(*ldip));
3054                        xfs_alert(mp,
3055                "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
3056                "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3057                                __func__, item, dip, bp, in_f->ilf_ino);
3058                        error = -EFSCORRUPTED;
3059                        goto out_release;
3060                }
3061        }
3062        if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3063                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3064                                     XFS_ERRLEVEL_LOW, mp, ldip,
3065                                     sizeof(*ldip));
3066                xfs_alert(mp,
3067        "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3068        "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
3069                        __func__, item, dip, bp, in_f->ilf_ino,
3070                        ldip->di_nextents + ldip->di_anextents,
3071                        ldip->di_nblocks);
3072                error = -EFSCORRUPTED;
3073                goto out_release;
3074        }
3075        if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3076                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3077                                     XFS_ERRLEVEL_LOW, mp, ldip,
3078                                     sizeof(*ldip));
3079                xfs_alert(mp,
3080        "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3081        "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
3082                        item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3083                error = -EFSCORRUPTED;
3084                goto out_release;
3085        }
3086        isize = xfs_log_dinode_size(ldip->di_version);
3087        if (unlikely(item->ri_buf[1].i_len > isize)) {
3088                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3089                                     XFS_ERRLEVEL_LOW, mp, ldip,
3090                                     sizeof(*ldip));
3091                xfs_alert(mp,
3092                        "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
3093                        __func__, item->ri_buf[1].i_len, item);
3094                error = -EFSCORRUPTED;
3095                goto out_release;
3096        }
3097
3098        /* recover the log dinode inode into the on disk inode */
3099        xfs_log_dinode_to_disk(ldip, dip);
3100
3101        fields = in_f->ilf_fields;
3102        if (fields & XFS_ILOG_DEV)
3103                xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3104
3105        if (in_f->ilf_size == 2)
3106                goto out_owner_change;
3107        len = item->ri_buf[2].i_len;
3108        src = item->ri_buf[2].i_addr;
3109        ASSERT(in_f->ilf_size <= 4);
3110        ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3111        ASSERT(!(fields & XFS_ILOG_DFORK) ||
3112               (len == in_f->ilf_dsize));
3113
3114        switch (fields & XFS_ILOG_DFORK) {
3115        case XFS_ILOG_DDATA:
3116        case XFS_ILOG_DEXT:
3117                memcpy(XFS_DFORK_DPTR(dip), src, len);
3118                break;
3119
3120        case XFS_ILOG_DBROOT:
3121                xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3122                                 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3123                                 XFS_DFORK_DSIZE(dip, mp));
3124                break;
3125
3126        default:
3127                /*
3128                 * There are no data fork flags set.
3129                 */
3130                ASSERT((fields & XFS_ILOG_DFORK) == 0);
3131                break;
3132        }
3133
3134        /*
3135         * If we logged any attribute data, recover it.  There may or
3136         * may not have been any other non-core data logged in this
3137         * transaction.
3138         */
3139        if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3140                if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3141                        attr_index = 3;
3142                } else {
3143                        attr_index = 2;
3144                }
3145                len = item->ri_buf[attr_index].i_len;
3146                src = item->ri_buf[attr_index].i_addr;
3147                ASSERT(len == in_f->ilf_asize);
3148
3149                switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3150                case XFS_ILOG_ADATA:
3151                case XFS_ILOG_AEXT:
3152                        dest = XFS_DFORK_APTR(dip);
3153                        ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3154                        memcpy(dest, src, len);
3155                        break;
3156
3157                case XFS_ILOG_ABROOT:
3158                        dest = XFS_DFORK_APTR(dip);
3159                        xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3160                                         len, (xfs_bmdr_block_t*)dest,
3161                                         XFS_DFORK_ASIZE(dip, mp));
3162                        break;
3163
3164                default:
3165                        xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3166                        ASSERT(0);
3167                        error = -EIO;
3168                        goto out_release;
3169                }
3170        }
3171
3172out_owner_change:
3173        /* Recover the swapext owner change unless inode has been deleted */
3174        if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) &&
3175            (dip->di_mode != 0))
3176                error = xfs_recover_inode_owner_change(mp, dip, in_f,
3177                                                       buffer_list);
3178        /* re-generate the checksum. */
3179        xfs_dinode_calc_crc(log->l_mp, dip);
3180
3181        ASSERT(bp->b_mount == mp);
3182        bp->b_iodone = xlog_recover_iodone;
3183        xfs_buf_delwri_queue(bp, buffer_list);
3184
3185out_release:
3186        xfs_buf_relse(bp);
3187error:
3188        if (need_free)
3189                kmem_free(in_f);
3190        return error;
3191}
3192
3193/*
3194 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3195 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3196 * of that type.
3197 */
3198STATIC int
3199xlog_recover_quotaoff_pass1(
3200        struct xlog                     *log,
3201        struct xlog_recover_item        *item)
3202{
3203        xfs_qoff_logformat_t    *qoff_f = item->ri_buf[0].i_addr;
3204        ASSERT(qoff_f);
3205
3206        /*
3207         * The logitem format's flag tells us if this was user quotaoff,
3208         * group/project quotaoff or both.
3209         */
3210        if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3211                log->l_quotaoffs_flag |= XFS_DQ_USER;
3212        if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3213                log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3214        if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3215                log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3216
3217        return 0;
3218}
3219
3220/*
3221 * Recover a dquot record
3222 */
3223STATIC int
3224xlog_recover_dquot_pass2(
3225        struct xlog                     *log,
3226        struct list_head                *buffer_list,
3227        struct xlog_recover_item        *item,
3228        xfs_lsn_t                       current_lsn)
3229{
3230        xfs_mount_t             *mp = log->l_mp;
3231        xfs_buf_t               *bp;
3232        struct xfs_disk_dquot   *ddq, *recddq;
3233        xfs_failaddr_t          fa;
3234        int                     error;
3235        xfs_dq_logformat_t      *dq_f;
3236        uint                    type;
3237
3238
3239        /*
3240         * Filesystems are required to send in quota flags at mount time.
3241         */
3242        if (mp->m_qflags == 0)
3243                return 0;
3244
3245        recddq = item->ri_buf[1].i_addr;
3246        if (recddq == NULL) {
3247                xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3248                return -EIO;
3249        }
3250        if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3251                xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3252                        item->ri_buf[1].i_len, __func__);
3253                return -EIO;
3254        }
3255
3256        /*
3257         * This type of quotas was turned off, so ignore this record.
3258         */
3259        type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3260        ASSERT(type);
3261        if (log->l_quotaoffs_flag & type)
3262                return 0;
3263
3264        /*
3265         * At this point we know that quota was _not_ turned off.
3266         * Since the mount flags are not indicating to us otherwise, this
3267         * must mean that quota is on, and the dquot needs to be replayed.
3268         * Remember that we may not have fully recovered the superblock yet,
3269         * so we can't do the usual trick of looking at the SB quota bits.
3270         *
3271         * The other possibility, of course, is that the quota subsystem was
3272         * removed since the last mount - ENOSYS.
3273         */
3274        dq_f = item->ri_buf[0].i_addr;
3275        ASSERT(dq_f);
3276        fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0);
3277        if (fa) {
3278                xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
3279                                dq_f->qlf_id, fa);
3280                return -EIO;
3281        }
3282        ASSERT(dq_f->qlf_len == 1);
3283
3284        /*
3285         * At this point we are assuming that the dquots have been allocated
3286         * and hence the buffer has valid dquots stamped in it. It should,
3287         * therefore, pass verifier validation. If the dquot is bad, then the
3288         * we'll return an error here, so we don't need to specifically check
3289         * the dquot in the buffer after the verifier has run.
3290         */
3291        error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3292                                   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3293                                   &xfs_dquot_buf_ops);
3294        if (error)
3295                return error;
3296
3297        ASSERT(bp);
3298        ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3299
3300        /*
3301         * If the dquot has an LSN in it, recover the dquot only if it's less
3302         * than the lsn of the transaction we are replaying.
3303         */
3304        if (xfs_sb_version_hascrc(&mp->m_sb)) {
3305                struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3306                xfs_lsn_t       lsn = be64_to_cpu(dqb->dd_lsn);
3307
3308                if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3309                        goto out_release;
3310                }
3311        }
3312
3313        memcpy(ddq, recddq, item->ri_buf[1].i_len);
3314        if (xfs_sb_version_hascrc(&mp->m_sb)) {
3315                xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3316                                 XFS_DQUOT_CRC_OFF);
3317        }
3318
3319        ASSERT(dq_f->qlf_size == 2);
3320        ASSERT(bp->b_mount == mp);
3321        bp->b_iodone = xlog_recover_iodone;
3322        xfs_buf_delwri_queue(bp, buffer_list);
3323
3324out_release:
3325        xfs_buf_relse(bp);
3326        return 0;
3327}
3328
3329/*
3330 * This routine is called to create an in-core extent free intent
3331 * item from the efi format structure which was logged on disk.
3332 * It allocates an in-core efi, copies the extents from the format
3333 * structure into it, and adds the efi to the AIL with the given
3334 * LSN.
3335 */
3336STATIC int
3337xlog_recover_efi_pass2(
3338        struct xlog                     *log,
3339        struct xlog_recover_item        *item,
3340        xfs_lsn_t                       lsn)
3341{
3342        int                             error;
3343        struct xfs_mount                *mp = log->l_mp;
3344        struct xfs_efi_log_item         *efip;
3345        struct xfs_efi_log_format       *efi_formatp;
3346
3347        efi_formatp = item->ri_buf[0].i_addr;
3348
3349        efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3350        error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3351        if (error) {
3352                xfs_efi_item_free(efip);
3353                return error;
3354        }
3355        atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3356
3357        spin_lock(&log->l_ailp->ail_lock);
3358        /*
3359         * The EFI has two references. One for the EFD and one for EFI to ensure
3360         * it makes it into the AIL. Insert the EFI into the AIL directly and
3361         * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3362         * AIL lock.
3363         */
3364        xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3365        xfs_efi_release(efip);
3366        return 0;
3367}
3368
3369
3370/*
3371 * This routine is called when an EFD format structure is found in a committed
3372 * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3373 * was still in the log. To do this it searches the AIL for the EFI with an id
3374 * equal to that in the EFD format structure. If we find it we drop the EFD
3375 * reference, which removes the EFI from the AIL and frees it.
3376 */
3377STATIC int
3378xlog_recover_efd_pass2(
3379        struct xlog                     *log,
3380        struct xlog_recover_item        *item)
3381{
3382        xfs_efd_log_format_t    *efd_formatp;
3383        xfs_efi_log_item_t      *efip = NULL;
3384        struct xfs_log_item     *lip;
3385        uint64_t                efi_id;
3386        struct xfs_ail_cursor   cur;
3387        struct xfs_ail          *ailp = log->l_ailp;
3388
3389        efd_formatp = item->ri_buf[0].i_addr;
3390        ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3391                ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3392               (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3393                ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3394        efi_id = efd_formatp->efd_efi_id;
3395
3396        /*
3397         * Search for the EFI with the id in the EFD format structure in the
3398         * AIL.
3399         */
3400        spin_lock(&ailp->ail_lock);
3401        lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3402        while (lip != NULL) {
3403                if (lip->li_type == XFS_LI_EFI) {
3404                        efip = (xfs_efi_log_item_t *)lip;
3405                        if (efip->efi_format.efi_id == efi_id) {
3406                                /*
3407                                 * Drop the EFD reference to the EFI. This
3408                                 * removes the EFI from the AIL and frees it.
3409                                 */
3410                                spin_unlock(&ailp->ail_lock);
3411                                xfs_efi_release(efip);
3412                                spin_lock(&ailp->ail_lock);
3413                                break;
3414                        }
3415                }
3416                lip = xfs_trans_ail_cursor_next(ailp, &cur);
3417        }
3418
3419        xfs_trans_ail_cursor_done(&cur);
3420        spin_unlock(&ailp->ail_lock);
3421
3422        return 0;
3423}
3424
3425/*
3426 * This routine is called to create an in-core extent rmap update
3427 * item from the rui format structure which was logged on disk.
3428 * It allocates an in-core rui, copies the extents from the format
3429 * structure into it, and adds the rui to the AIL with the given
3430 * LSN.
3431 */
3432STATIC int
3433xlog_recover_rui_pass2(
3434        struct xlog                     *log,
3435        struct xlog_recover_item        *item,
3436        xfs_lsn_t                       lsn)
3437{
3438        int                             error;
3439        struct xfs_mount                *mp = log->l_mp;
3440        struct xfs_rui_log_item         *ruip;
3441        struct xfs_rui_log_format       *rui_formatp;
3442
3443        rui_formatp = item->ri_buf[0].i_addr;
3444
3445        ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3446        error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3447        if (error) {
3448                xfs_rui_item_free(ruip);
3449                return error;
3450        }
3451        atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3452
3453        spin_lock(&log->l_ailp->ail_lock);
3454        /*
3455         * The RUI has two references. One for the RUD and one for RUI to ensure
3456         * it makes it into the AIL. Insert the RUI into the AIL directly and
3457         * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3458         * AIL lock.
3459         */
3460        xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3461        xfs_rui_release(ruip);
3462        return 0;
3463}
3464
3465
3466/*
3467 * This routine is called when an RUD format structure is found in a committed
3468 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3469 * was still in the log. To do this it searches the AIL for the RUI with an id
3470 * equal to that in the RUD format structure. If we find it we drop the RUD
3471 * reference, which removes the RUI from the AIL and frees it.
3472 */
3473STATIC int
3474xlog_recover_rud_pass2(
3475        struct xlog                     *log,
3476        struct xlog_recover_item        *item)
3477{
3478        struct xfs_rud_log_format       *rud_formatp;
3479        struct xfs_rui_log_item         *ruip = NULL;
3480        struct xfs_log_item             *lip;
3481        uint64_t                        rui_id;
3482        struct xfs_ail_cursor           cur;
3483        struct xfs_ail                  *ailp = log->l_ailp;
3484
3485        rud_formatp = item->ri_buf[0].i_addr;
3486        ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3487        rui_id = rud_formatp->rud_rui_id;
3488
3489        /*
3490         * Search for the RUI with the id in the RUD format structure in the
3491         * AIL.
3492         */
3493        spin_lock(&ailp->ail_lock);
3494        lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3495        while (lip != NULL) {
3496                if (lip->li_type == XFS_LI_RUI) {
3497                        ruip = (struct xfs_rui_log_item *)lip;
3498                        if (ruip->rui_format.rui_id == rui_id) {
3499                                /*
3500                                 * Drop the RUD reference to the RUI. This
3501                                 * removes the RUI from the AIL and frees it.
3502                                 */
3503                                spin_unlock(&ailp->ail_lock);
3504                                xfs_rui_release(ruip);
3505                                spin_lock(&ailp->ail_lock);
3506                                break;
3507                        }
3508                }
3509                lip = xfs_trans_ail_cursor_next(ailp, &cur);
3510        }
3511
3512        xfs_trans_ail_cursor_done(&cur);
3513        spin_unlock(&ailp->ail_lock);
3514
3515        return 0;
3516}
3517
3518/*
3519 * Copy an CUI format buffer from the given buf, and into the destination
3520 * CUI format structure.  The CUI/CUD items were designed not to need any
3521 * special alignment handling.
3522 */
3523static int
3524xfs_cui_copy_format(
3525        struct xfs_log_iovec            *buf,
3526        struct xfs_cui_log_format       *dst_cui_fmt)
3527{
3528        struct xfs_cui_log_format       *src_cui_fmt;
3529        uint                            len;
3530
3531        src_cui_fmt = buf->i_addr;
3532        len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3533
3534        if (buf->i_len == len) {
3535                memcpy(dst_cui_fmt, src_cui_fmt, len);
3536                return 0;
3537        }
3538        return -EFSCORRUPTED;
3539}
3540
3541/*
3542 * This routine is called to create an in-core extent refcount update
3543 * item from the cui format structure which was logged on disk.
3544 * It allocates an in-core cui, copies the extents from the format
3545 * structure into it, and adds the cui to the AIL with the given
3546 * LSN.
3547 */
3548STATIC int
3549xlog_recover_cui_pass2(
3550        struct xlog                     *log,
3551        struct xlog_recover_item        *item,
3552        xfs_lsn_t                       lsn)
3553{
3554        int                             error;
3555        struct xfs_mount                *mp = log->l_mp;
3556        struct xfs_cui_log_item         *cuip;
3557        struct xfs_cui_log_format       *cui_formatp;
3558
3559        cui_formatp = item->ri_buf[0].i_addr;
3560
3561        cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3562        error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3563        if (error) {
3564                xfs_cui_item_free(cuip);
3565                return error;
3566        }
3567        atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3568
3569        spin_lock(&log->l_ailp->ail_lock);
3570        /*
3571         * The CUI has two references. One for the CUD and one for CUI to ensure
3572         * it makes it into the AIL. Insert the CUI into the AIL directly and
3573         * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3574         * AIL lock.
3575         */
3576        xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3577        xfs_cui_release(cuip);
3578        return 0;
3579}
3580
3581
3582/*
3583 * This routine is called when an CUD format structure is found in a committed
3584 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3585 * was still in the log. To do this it searches the AIL for the CUI with an id
3586 * equal to that in the CUD format structure. If we find it we drop the CUD
3587 * reference, which removes the CUI from the AIL and frees it.
3588 */
3589STATIC int
3590xlog_recover_cud_pass2(
3591        struct xlog                     *log,
3592        struct xlog_recover_item        *item)
3593{
3594        struct xfs_cud_log_format       *cud_formatp;
3595        struct xfs_cui_log_item         *cuip = NULL;
3596        struct xfs_log_item             *lip;
3597        uint64_t                        cui_id;
3598        struct xfs_ail_cursor           cur;
3599        struct xfs_ail                  *ailp = log->l_ailp;
3600
3601        cud_formatp = item->ri_buf[0].i_addr;
3602        if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3603                return -EFSCORRUPTED;
3604        cui_id = cud_formatp->cud_cui_id;
3605
3606        /*
3607         * Search for the CUI with the id in the CUD format structure in the
3608         * AIL.
3609         */
3610        spin_lock(&ailp->ail_lock);
3611        lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3612        while (lip != NULL) {
3613                if (lip->li_type == XFS_LI_CUI) {
3614                        cuip = (struct xfs_cui_log_item *)lip;
3615                        if (cuip->cui_format.cui_id == cui_id) {
3616                                /*
3617                                 * Drop the CUD reference to the CUI. This
3618                                 * removes the CUI from the AIL and frees it.
3619                                 */
3620                                spin_unlock(&ailp->ail_lock);
3621                                xfs_cui_release(cuip);
3622                                spin_lock(&ailp->ail_lock);
3623                                break;
3624                        }
3625                }
3626                lip = xfs_trans_ail_cursor_next(ailp, &cur);
3627        }
3628
3629        xfs_trans_ail_cursor_done(&cur);
3630        spin_unlock(&ailp->ail_lock);
3631
3632        return 0;
3633}
3634
3635/*
3636 * Copy an BUI format buffer from the given buf, and into the destination
3637 * BUI format structure.  The BUI/BUD items were designed not to need any
3638 * special alignment handling.
3639 */
3640static int
3641xfs_bui_copy_format(
3642        struct xfs_log_iovec            *buf,
3643        struct xfs_bui_log_format       *dst_bui_fmt)
3644{
3645        struct xfs_bui_log_format       *src_bui_fmt;
3646        uint                            len;
3647
3648        src_bui_fmt = buf->i_addr;
3649        len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3650
3651        if (buf->i_len == len) {
3652                memcpy(dst_bui_fmt, src_bui_fmt, len);
3653                return 0;
3654        }
3655        return -EFSCORRUPTED;
3656}
3657
3658/*
3659 * This routine is called to create an in-core extent bmap update
3660 * item from the bui format structure which was logged on disk.
3661 * It allocates an in-core bui, copies the extents from the format
3662 * structure into it, and adds the bui to the AIL with the given
3663 * LSN.
3664 */
3665STATIC int
3666xlog_recover_bui_pass2(
3667        struct xlog                     *log,
3668        struct xlog_recover_item        *item,
3669        xfs_lsn_t                       lsn)
3670{
3671        int                             error;
3672        struct xfs_mount                *mp = log->l_mp;
3673        struct xfs_bui_log_item         *buip;
3674        struct xfs_bui_log_format       *bui_formatp;
3675
3676        bui_formatp = item->ri_buf[0].i_addr;
3677
3678        if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3679                return -EFSCORRUPTED;
3680        buip = xfs_bui_init(mp);
3681        error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3682        if (error) {
3683                xfs_bui_item_free(buip);
3684                return error;
3685        }
3686        atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3687
3688        spin_lock(&log->l_ailp->ail_lock);
3689        /*
3690         * The RUI has two references. One for the RUD and one for RUI to ensure
3691         * it makes it into the AIL. Insert the RUI into the AIL directly and
3692         * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3693         * AIL lock.
3694         */
3695        xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3696        xfs_bui_release(buip);
3697        return 0;
3698}
3699
3700
3701/*
3702 * This routine is called when an BUD format structure is found in a committed
3703 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3704 * was still in the log. To do this it searches the AIL for the BUI with an id
3705 * equal to that in the BUD format structure. If we find it we drop the BUD
3706 * reference, which removes the BUI from the AIL and frees it.
3707 */
3708STATIC int
3709xlog_recover_bud_pass2(
3710        struct xlog                     *log,
3711        struct xlog_recover_item        *item)
3712{
3713        struct xfs_bud_log_format       *bud_formatp;
3714        struct xfs_bui_log_item         *buip = NULL;
3715        struct xfs_log_item             *lip;
3716        uint64_t                        bui_id;
3717        struct xfs_ail_cursor           cur;
3718        struct xfs_ail                  *ailp = log->l_ailp;
3719
3720        bud_formatp = item->ri_buf[0].i_addr;
3721        if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3722                return -EFSCORRUPTED;
3723        bui_id = bud_formatp->bud_bui_id;
3724
3725        /*
3726         * Search for the BUI with the id in the BUD format structure in the
3727         * AIL.
3728         */
3729        spin_lock(&ailp->ail_lock);
3730        lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3731        while (lip != NULL) {
3732                if (lip->li_type == XFS_LI_BUI) {
3733                        buip = (struct xfs_bui_log_item *)lip;
3734                        if (buip->bui_format.bui_id == bui_id) {
3735                                /*
3736                                 * Drop the BUD reference to the BUI. This
3737                                 * removes the BUI from the AIL and frees it.
3738                                 */
3739                                spin_unlock(&ailp->ail_lock);
3740                                xfs_bui_release(buip);
3741                                spin_lock(&ailp->ail_lock);
3742                                break;
3743                        }
3744                }
3745                lip = xfs_trans_ail_cursor_next(ailp, &cur);
3746        }
3747
3748        xfs_trans_ail_cursor_done(&cur);
3749        spin_unlock(&ailp->ail_lock);
3750
3751        return 0;
3752}
3753
3754/*
3755 * This routine is called when an inode create format structure is found in a
3756 * committed transaction in the log.  It's purpose is to initialise the inodes
3757 * being allocated on disk. This requires us to get inode cluster buffers that
3758 * match the range to be initialised, stamped with inode templates and written
3759 * by delayed write so that subsequent modifications will hit the cached buffer
3760 * and only need writing out at the end of recovery.
3761 */
3762STATIC int
3763xlog_recover_do_icreate_pass2(
3764        struct xlog             *log,
3765        struct list_head        *buffer_list,
3766        xlog_recover_item_t     *item)
3767{
3768        struct xfs_mount        *mp = log->l_mp;
3769        struct xfs_icreate_log  *icl;
3770        struct xfs_ino_geometry *igeo = M_IGEO(mp);
3771        xfs_agnumber_t          agno;
3772        xfs_agblock_t           agbno;
3773        unsigned int            count;
3774        unsigned int            isize;
3775        xfs_agblock_t           length;
3776        int                     bb_per_cluster;
3777        int                     cancel_count;
3778        int                     nbufs;
3779        int                     i;
3780
3781        icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3782        if (icl->icl_type != XFS_LI_ICREATE) {
3783                xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3784                return -EINVAL;
3785        }
3786
3787        if (icl->icl_size != 1) {
3788                xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3789                return -EINVAL;
3790        }
3791
3792        agno = be32_to_cpu(icl->icl_ag);
3793        if (agno >= mp->m_sb.sb_agcount) {
3794                xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3795                return -EINVAL;
3796        }
3797        agbno = be32_to_cpu(icl->icl_agbno);
3798        if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3799                xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3800                return -EINVAL;
3801        }
3802        isize = be32_to_cpu(icl->icl_isize);
3803        if (isize != mp->m_sb.sb_inodesize) {
3804                xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3805                return -EINVAL;
3806        }
3807        count = be32_to_cpu(icl->icl_count);
3808        if (!count) {
3809                xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3810                return -EINVAL;
3811        }
3812        length = be32_to_cpu(icl->icl_length);
3813        if (!length || length >= mp->m_sb.sb_agblocks) {
3814                xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3815                return -EINVAL;
3816        }
3817
3818        /*
3819         * The inode chunk is either full or sparse and we only support
3820         * m_ino_geo.ialloc_min_blks sized sparse allocations at this time.
3821         */
3822        if (length != igeo->ialloc_blks &&
3823            length != igeo->ialloc_min_blks) {
3824                xfs_warn(log->l_mp,
3825                         "%s: unsupported chunk length", __FUNCTION__);
3826                return -EINVAL;
3827        }
3828
3829        /* verify inode count is consistent with extent length */
3830        if ((count >> mp->m_sb.sb_inopblog) != length) {
3831                xfs_warn(log->l_mp,
3832                         "%s: inconsistent inode count and chunk length",
3833                         __FUNCTION__);
3834                return -EINVAL;
3835        }
3836
3837        /*
3838         * The icreate transaction can cover multiple cluster buffers and these
3839         * buffers could have been freed and reused. Check the individual
3840         * buffers for cancellation so we don't overwrite anything written after
3841         * a cancellation.
3842         */
3843        bb_per_cluster = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster);
3844        nbufs = length / igeo->blocks_per_cluster;
3845        for (i = 0, cancel_count = 0; i < nbufs; i++) {
3846                xfs_daddr_t     daddr;
3847
3848                daddr = XFS_AGB_TO_DADDR(mp, agno,
3849                                agbno + i * igeo->blocks_per_cluster);
3850                if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3851                        cancel_count++;
3852        }
3853
3854        /*
3855         * We currently only use icreate for a single allocation at a time. This
3856         * means we should expect either all or none of the buffers to be
3857         * cancelled. Be conservative and skip replay if at least one buffer is
3858         * cancelled, but warn the user that something is awry if the buffers
3859         * are not consistent.
3860         *
3861         * XXX: This must be refined to only skip cancelled clusters once we use
3862         * icreate for multiple chunk allocations.
3863         */
3864        ASSERT(!cancel_count || cancel_count == nbufs);
3865        if (cancel_count) {
3866                if (cancel_count != nbufs)
3867                        xfs_warn(mp,
3868        "WARNING: partial inode chunk cancellation, skipped icreate.");
3869                trace_xfs_log_recover_icreate_cancel(log, icl);
3870                return 0;
3871        }
3872
3873        trace_xfs_log_recover_icreate_recover(log, icl);
3874        return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3875                                     length, be32_to_cpu(icl->icl_gen));
3876}
3877
3878STATIC void
3879xlog_recover_buffer_ra_pass2(
3880        struct xlog                     *log,
3881        struct xlog_recover_item        *item)
3882{
3883        struct xfs_buf_log_format       *buf_f = item->ri_buf[0].i_addr;
3884        struct xfs_mount                *mp = log->l_mp;
3885
3886        if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3887                        buf_f->blf_len, buf_f->blf_flags)) {
3888                return;
3889        }
3890
3891        xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3892                                buf_f->blf_len, NULL);
3893}
3894
3895STATIC void
3896xlog_recover_inode_ra_pass2(
3897        struct xlog                     *log,
3898        struct xlog_recover_item        *item)
3899{
3900        struct xfs_inode_log_format     ilf_buf;
3901        struct xfs_inode_log_format     *ilfp;
3902        struct xfs_mount                *mp = log->l_mp;
3903        int                     error;
3904
3905        if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3906                ilfp = item->ri_buf[0].i_addr;
3907        } else {
3908                ilfp = &ilf_buf;
3909                memset(ilfp, 0, sizeof(*ilfp));
3910                error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3911                if (error)
3912                        return;
3913        }
3914
3915        if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3916                return;
3917
3918        xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3919                                ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3920}
3921
3922STATIC void
3923xlog_recover_dquot_ra_pass2(
3924        struct xlog                     *log,
3925        struct xlog_recover_item        *item)
3926{
3927        struct xfs_mount        *mp = log->l_mp;
3928        struct xfs_disk_dquot   *recddq;
3929        struct xfs_dq_logformat *dq_f;
3930        uint                    type;
3931        int                     len;
3932
3933
3934        if (mp->m_qflags == 0)
3935                return;
3936
3937        recddq = item->ri_buf[1].i_addr;
3938        if (recddq == NULL)
3939                return;
3940        if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3941                return;
3942
3943        type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3944        ASSERT(type);
3945        if (log->l_quotaoffs_flag & type)
3946                return;
3947
3948        dq_f = item->ri_buf[0].i_addr;
3949        ASSERT(dq_f);
3950        ASSERT(dq_f->qlf_len == 1);
3951
3952        len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
3953        if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
3954                return;
3955
3956        xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
3957                          &xfs_dquot_buf_ra_ops);
3958}
3959
3960STATIC void
3961xlog_recover_ra_pass2(
3962        struct xlog                     *log,
3963        struct xlog_recover_item        *item)
3964{
3965        switch (ITEM_TYPE(item)) {
3966        case XFS_LI_BUF:
3967                xlog_recover_buffer_ra_pass2(log, item);
3968                break;
3969        case XFS_LI_INODE:
3970                xlog_recover_inode_ra_pass2(log, item);
3971                break;
3972        case XFS_LI_DQUOT:
3973                xlog_recover_dquot_ra_pass2(log, item);
3974                break;
3975        case XFS_LI_EFI:
3976        case XFS_LI_EFD:
3977        case XFS_LI_QUOTAOFF:
3978        case XFS_LI_RUI:
3979        case XFS_LI_RUD:
3980        case XFS_LI_CUI:
3981        case XFS_LI_CUD:
3982        case XFS_LI_BUI:
3983        case XFS_LI_BUD:
3984        default:
3985                break;
3986        }
3987}
3988
3989STATIC int
3990xlog_recover_commit_pass1(
3991        struct xlog                     *log,
3992        struct xlog_recover             *trans,
3993        struct xlog_recover_item        *item)
3994{
3995        trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3996
3997        switch (ITEM_TYPE(item)) {
3998        case XFS_LI_BUF:
3999                return xlog_recover_buffer_pass1(log, item);
4000        case XFS_LI_QUOTAOFF:
4001                return xlog_recover_quotaoff_pass1(log, item);
4002        case XFS_LI_INODE:
4003        case XFS_LI_EFI:
4004        case XFS_LI_EFD:
4005        case XFS_LI_DQUOT:
4006        case XFS_LI_ICREATE:
4007        case XFS_LI_RUI:
4008        case XFS_LI_RUD:
4009        case XFS_LI_CUI:
4010        case XFS_LI_CUD:
4011        case XFS_LI_BUI:
4012        case XFS_LI_BUD:
4013                /* nothing to do in pass 1 */
4014                return 0;
4015        default:
4016                xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4017                        __func__, ITEM_TYPE(item));
4018                ASSERT(0);
4019                return -EIO;
4020        }
4021}
4022
4023STATIC int
4024xlog_recover_commit_pass2(
4025        struct xlog                     *log,
4026        struct xlog_recover             *trans,
4027        struct list_head                *buffer_list,
4028        struct xlog_recover_item        *item)
4029{
4030        trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4031
4032        switch (ITEM_TYPE(item)) {
4033        case XFS_LI_BUF:
4034                return xlog_recover_buffer_pass2(log, buffer_list, item,
4035                                                 trans->r_lsn);
4036        case XFS_LI_INODE:
4037                return xlog_recover_inode_pass2(log, buffer_list, item,
4038                                                 trans->r_lsn);
4039        case XFS_LI_EFI:
4040                return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4041        case XFS_LI_EFD:
4042                return xlog_recover_efd_pass2(log, item);
4043        case XFS_LI_RUI:
4044                return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4045        case XFS_LI_RUD:
4046                return xlog_recover_rud_pass2(log, item);
4047        case XFS_LI_CUI:
4048                return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4049        case XFS_LI_CUD:
4050                return xlog_recover_cud_pass2(log, item);
4051        case XFS_LI_BUI:
4052                return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4053        case XFS_LI_BUD:
4054                return xlog_recover_bud_pass2(log, item);
4055        case XFS_LI_DQUOT:
4056                return xlog_recover_dquot_pass2(log, buffer_list, item,
4057                                                trans->r_lsn);
4058        case XFS_LI_ICREATE:
4059                return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4060        case XFS_LI_QUOTAOFF:
4061                /* nothing to do in pass2 */
4062                return 0;
4063        default:
4064                xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4065                        __func__, ITEM_TYPE(item));
4066                ASSERT(0);
4067                return -EIO;
4068        }
4069}
4070
4071STATIC int
4072xlog_recover_items_pass2(
4073        struct xlog                     *log,
4074        struct xlog_recover             *trans,
4075        struct list_head                *buffer_list,
4076        struct list_head                *item_list)
4077{
4078        struct xlog_recover_item        *item;
4079        int                             error = 0;
4080
4081        list_for_each_entry(item, item_list, ri_list) {
4082                error = xlog_recover_commit_pass2(log, trans,
4083                                          buffer_list, item);
4084                if (error)
4085                        return error;
4086        }
4087
4088        return error;
4089}
4090
4091/*
4092 * Perform the transaction.
4093 *
4094 * If the transaction modifies a buffer or inode, do it now.  Otherwise,
4095 * EFIs and EFDs get queued up by adding entries into the AIL for them.
4096 */
4097STATIC int
4098xlog_recover_commit_trans(
4099        struct xlog             *log,
4100        struct xlog_recover     *trans,
4101        int                     pass,
4102        struct list_head        *buffer_list)
4103{
4104        int                             error = 0;
4105        int                             items_queued = 0;
4106        struct xlog_recover_item        *item;
4107        struct xlog_recover_item        *next;
4108        LIST_HEAD                       (ra_list);
4109        LIST_HEAD                       (done_list);
4110
4111        #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4112
4113        hlist_del_init(&trans->r_list);
4114
4115        error = xlog_recover_reorder_trans(log, trans, pass);
4116        if (error)
4117                return error;
4118
4119        list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4120                switch (pass) {
4121                case XLOG_RECOVER_PASS1:
4122                        error = xlog_recover_commit_pass1(log, trans, item);
4123                        break;
4124                case XLOG_RECOVER_PASS2:
4125                        xlog_recover_ra_pass2(log, item);
4126                        list_move_tail(&item->ri_list, &ra_list);
4127                        items_queued++;
4128                        if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4129                                error = xlog_recover_items_pass2(log, trans,
4130                                                buffer_list, &ra_list);
4131                                list_splice_tail_init(&ra_list, &done_list);
4132                                items_queued = 0;
4133                        }
4134
4135                        break;
4136                default:
4137                        ASSERT(0);
4138                }
4139
4140                if (error)
4141                        goto out;
4142        }
4143
4144out:
4145        if (!list_empty(&ra_list)) {
4146                if (!error)
4147                        error = xlog_recover_items_pass2(log, trans,
4148                                        buffer_list, &ra_list);
4149                list_splice_tail_init(&ra_list, &done_list);
4150        }
4151
4152        if (!list_empty(&done_list))
4153                list_splice_init(&done_list, &trans->r_itemq);
4154
4155        return error;
4156}
4157
4158STATIC void
4159xlog_recover_add_item(
4160        struct list_head        *head)
4161{
4162        xlog_recover_item_t     *item;
4163
4164        item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
4165        INIT_LIST_HEAD(&item->ri_list);
4166        list_add_tail(&item->ri_list, head);
4167}
4168
4169STATIC int
4170xlog_recover_add_to_cont_trans(
4171        struct xlog             *log,
4172        struct xlog_recover     *trans,
4173        char                    *dp,
4174        int                     len)
4175{
4176        xlog_recover_item_t     *item;
4177        char                    *ptr, *old_ptr;
4178        int                     old_len;
4179
4180        /*
4181         * If the transaction is empty, the header was split across this and the
4182         * previous record. Copy the rest of the header.
4183         */
4184        if (list_empty(&trans->r_itemq)) {
4185                ASSERT(len <= sizeof(struct xfs_trans_header));
4186                if (len > sizeof(struct xfs_trans_header)) {
4187                        xfs_warn(log->l_mp, "%s: bad header length", __func__);
4188                        return -EIO;
4189                }
4190
4191                xlog_recover_add_item(&trans->r_itemq);
4192                ptr = (char *)&trans->r_theader +
4193                                sizeof(struct xfs_trans_header) - len;
4194                memcpy(ptr, dp, len);
4195                return 0;
4196        }
4197
4198        /* take the tail entry */
4199        item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4200
4201        old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4202        old_len = item->ri_buf[item->ri_cnt-1].i_len;
4203
4204        ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
4205        memcpy(&ptr[old_len], dp, len);
4206        item->ri_buf[item->ri_cnt-1].i_len += len;
4207        item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4208        trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4209        return 0;
4210}
4211
4212/*
4213 * The next region to add is the start of a new region.  It could be
4214 * a whole region or it could be the first part of a new region.  Because
4215 * of this, the assumption here is that the type and size fields of all
4216 * format structures fit into the first 32 bits of the structure.
4217 *
4218 * This works because all regions must be 32 bit aligned.  Therefore, we
4219 * either have both fields or we have neither field.  In the case we have
4220 * neither field, the data part of the region is zero length.  We only have
4221 * a log_op_header and can throw away the header since a new one will appear
4222 * later.  If we have at least 4 bytes, then we can determine how many regions
4223 * will appear in the current log item.
4224 */
4225STATIC int
4226xlog_recover_add_to_trans(
4227        struct xlog             *log,
4228        struct xlog_recover     *trans,
4229        char                    *dp,
4230        int                     len)
4231{
4232        struct xfs_inode_log_format     *in_f;                  /* any will do */
4233        xlog_recover_item_t     *item;
4234        char                    *ptr;
4235
4236        if (!len)
4237                return 0;
4238        if (list_empty(&trans->r_itemq)) {
4239                /* we need to catch log corruptions here */
4240                if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4241                        xfs_warn(log->l_mp, "%s: bad header magic number",
4242                                __func__);
4243                        ASSERT(0);
4244                        return -EIO;
4245                }
4246
4247                if (len > sizeof(struct xfs_trans_header)) {
4248                        xfs_warn(log->l_mp, "%s: bad header length", __func__);
4249                        ASSERT(0);
4250                        return -EIO;
4251                }
4252
4253                /*
4254                 * The transaction header can be arbitrarily split across op
4255                 * records. If we don't have the whole thing here, copy what we
4256                 * do have and handle the rest in the next record.
4257                 */
4258                if (len == sizeof(struct xfs_trans_header))
4259                        xlog_recover_add_item(&trans->r_itemq);
4260                memcpy(&trans->r_theader, dp, len);
4261                return 0;
4262        }
4263
4264        ptr = kmem_alloc(len, KM_SLEEP);
4265        memcpy(ptr, dp, len);
4266        in_f = (struct xfs_inode_log_format *)ptr;
4267
4268        /* take the tail entry */
4269        item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4270        if (item->ri_total != 0 &&
4271             item->ri_total == item->ri_cnt) {
4272                /* tail item is in use, get a new one */
4273                xlog_recover_add_item(&trans->r_itemq);
4274                item = list_entry(trans->r_itemq.prev,
4275                                        xlog_recover_item_t, ri_list);
4276        }
4277
4278        if (item->ri_total == 0) {              /* first region to be added */
4279                if (in_f->ilf_size == 0 ||
4280                    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4281                        xfs_warn(log->l_mp,
4282                "bad number of regions (%d) in inode log format",
4283                                  in_f->ilf_size);
4284                        ASSERT(0);
4285                        kmem_free(ptr);
4286                        return -EIO;
4287                }
4288
4289                item->ri_total = in_f->ilf_size;
4290                item->ri_buf =
4291                        kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4292                                    KM_SLEEP);
4293        }
4294        ASSERT(item->ri_total > item->ri_cnt);
4295        /* Description region is ri_buf[0] */
4296        item->ri_buf[item->ri_cnt].i_addr = ptr;
4297        item->ri_buf[item->ri_cnt].i_len  = len;
4298        item->ri_cnt++;
4299        trace_xfs_log_recover_item_add(log, trans, item, 0);
4300        return 0;
4301}
4302
4303/*
4304 * Free up any resources allocated by the transaction
4305 *
4306 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4307 */
4308STATIC void
4309xlog_recover_free_trans(
4310        struct xlog_recover     *trans)
4311{
4312        xlog_recover_item_t     *item, *n;
4313        int                     i;
4314
4315        hlist_del_init(&trans->r_list);
4316
4317        list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4318                /* Free the regions in the item. */
4319                list_del(&item->ri_list);
4320                for (i = 0; i < item->ri_cnt; i++)
4321                        kmem_free(item->ri_buf[i].i_addr);
4322                /* Free the item itself */
4323                kmem_free(item->ri_buf);
4324                kmem_free(item);
4325        }
4326        /* Free the transaction recover structure */
4327        kmem_free(trans);
4328}
4329
4330/*
4331 * On error or completion, trans is freed.
4332 */
4333STATIC int
4334xlog_recovery_process_trans(
4335        struct xlog             *log,
4336        struct xlog_recover     *trans,
4337        char                    *dp,
4338        unsigned int            len,
4339        unsigned int            flags,
4340        int                     pass,
4341        struct list_head        *buffer_list)
4342{
4343        int                     error = 0;
4344        bool                    freeit = false;
4345
4346        /* mask off ophdr transaction container flags */
4347        flags &= ~XLOG_END_TRANS;
4348        if (flags & XLOG_WAS_CONT_TRANS)
4349                flags &= ~XLOG_CONTINUE_TRANS;
4350
4351        /*
4352         * Callees must not free the trans structure. We'll decide if we need to
4353         * free it or not based on the operation being done and it's result.
4354         */
4355        switch (flags) {
4356        /* expected flag values */
4357        case 0:
4358        case XLOG_CONTINUE_TRANS:
4359                error = xlog_recover_add_to_trans(log, trans, dp, len);
4360                break;
4361        case XLOG_WAS_CONT_TRANS:
4362                error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4363                break;
4364        case XLOG_COMMIT_TRANS:
4365                error = xlog_recover_commit_trans(log, trans, pass,
4366                                                  buffer_list);
4367                /* success or fail, we are now done with this transaction. */
4368                freeit = true;
4369                break;
4370
4371        /* unexpected flag values */
4372        case XLOG_UNMOUNT_TRANS:
4373                /* just skip trans */
4374                xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4375                freeit = true;
4376                break;
4377        case XLOG_START_TRANS:
4378        default:
4379                xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4380                ASSERT(0);
4381                error = -EIO;
4382                break;
4383        }
4384        if (error || freeit)
4385                xlog_recover_free_trans(trans);
4386        return error;
4387}
4388
4389/*
4390 * Lookup the transaction recovery structure associated with the ID in the
4391 * current ophdr. If the transaction doesn't exist and the start flag is set in
4392 * the ophdr, then allocate a new transaction for future ID matches to find.
4393 * Either way, return what we found during the lookup - an existing transaction
4394 * or nothing.
4395 */
4396STATIC struct xlog_recover *
4397xlog_recover_ophdr_to_trans(
4398        struct hlist_head       rhash[],
4399        struct xlog_rec_header  *rhead,
4400        struct xlog_op_header   *ohead)
4401{
4402        struct xlog_recover     *trans;
4403        xlog_tid_t              tid;
4404        struct hlist_head       *rhp;
4405
4406        tid = be32_to_cpu(ohead->oh_tid);
4407        rhp = &rhash[XLOG_RHASH(tid)];
4408        hlist_for_each_entry(trans, rhp, r_list) {
4409                if (trans->r_log_tid == tid)
4410                        return trans;
4411        }
4412
4413        /*
4414         * skip over non-start transaction headers - we could be
4415         * processing slack space before the next transaction starts
4416         */
4417        if (!(ohead->oh_flags & XLOG_START_TRANS))
4418                return NULL;
4419
4420        ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4421
4422        /*
4423         * This is a new transaction so allocate a new recovery container to
4424         * hold the recovery ops that will follow.
4425         */
4426        trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4427        trans->r_log_tid = tid;
4428        trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4429        INIT_LIST_HEAD(&trans->r_itemq);
4430        INIT_HLIST_NODE(&trans->r_list);
4431        hlist_add_head(&trans->r_list, rhp);
4432
4433        /*
4434         * Nothing more to do for this ophdr. Items to be added to this new
4435         * transaction will be in subsequent ophdr containers.
4436         */
4437        return NULL;
4438}
4439
4440STATIC int
4441xlog_recover_process_ophdr(
4442        struct xlog             *log,
4443        struct hlist_head       rhash[],
4444        struct xlog_rec_header  *rhead,
4445        struct xlog_op_header   *ohead,
4446        char                    *dp,
4447        char                    *end,
4448        int                     pass,
4449        struct list_head        *buffer_list)
4450{
4451        struct xlog_recover     *trans;
4452        unsigned int            len;
4453        int                     error;
4454
4455        /* Do we understand who wrote this op? */
4456        if (ohead->oh_clientid != XFS_TRANSACTION &&
4457            ohead->oh_clientid != XFS_LOG) {
4458                xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4459                        __func__, ohead->oh_clientid);
4460                ASSERT(0);
4461                return -EIO;
4462        }
4463
4464        /*
4465         * Check the ophdr contains all the data it is supposed to contain.
4466         */
4467        len = be32_to_cpu(ohead->oh_len);
4468        if (dp + len > end) {
4469                xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4470                WARN_ON(1);
4471                return -EIO;
4472        }
4473
4474        trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4475        if (!trans) {
4476                /* nothing to do, so skip over this ophdr */
4477                return 0;
4478        }
4479
4480        /*
4481         * The recovered buffer queue is drained only once we know that all
4482         * recovery items for the current LSN have been processed. This is
4483         * required because:
4484         *
4485         * - Buffer write submission updates the metadata LSN of the buffer.
4486         * - Log recovery skips items with a metadata LSN >= the current LSN of
4487         *   the recovery item.
4488         * - Separate recovery items against the same metadata buffer can share
4489         *   a current LSN. I.e., consider that the LSN of a recovery item is
4490         *   defined as the starting LSN of the first record in which its
4491         *   transaction appears, that a record can hold multiple transactions,
4492         *   and/or that a transaction can span multiple records.
4493         *
4494         * In other words, we are allowed to submit a buffer from log recovery
4495         * once per current LSN. Otherwise, we may incorrectly skip recovery
4496         * items and cause corruption.
4497         *
4498         * We don't know up front whether buffers are updated multiple times per
4499         * LSN. Therefore, track the current LSN of each commit log record as it
4500         * is processed and drain the queue when it changes. Use commit records
4501         * because they are ordered correctly by the logging code.
4502         */
4503        if (log->l_recovery_lsn != trans->r_lsn &&
4504            ohead->oh_flags & XLOG_COMMIT_TRANS) {
4505                error = xfs_buf_delwri_submit(buffer_list);
4506                if (error)
4507                        return error;
4508                log->l_recovery_lsn = trans->r_lsn;
4509        }
4510
4511        return xlog_recovery_process_trans(log, trans, dp, len,
4512                                           ohead->oh_flags, pass, buffer_list);
4513}
4514
4515/*
4516 * There are two valid states of the r_state field.  0 indicates that the
4517 * transaction structure is in a normal state.  We have either seen the
4518 * start of the transaction or the last operation we added was not a partial
4519 * operation.  If the last operation we added to the transaction was a
4520 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4521 *
4522 * NOTE: skip LRs with 0 data length.
4523 */
4524STATIC int
4525xlog_recover_process_data(
4526        struct xlog             *log,
4527        struct hlist_head       rhash[],
4528        struct xlog_rec_header  *rhead,
4529        char                    *dp,
4530        int                     pass,
4531        struct list_head        *buffer_list)
4532{
4533        struct xlog_op_header   *ohead;
4534        char                    *end;
4535        int                     num_logops;
4536        int                     error;
4537
4538        end = dp + be32_to_cpu(rhead->h_len);
4539        num_logops = be32_to_cpu(rhead->h_num_logops);
4540
4541        /* check the log format matches our own - else we can't recover */
4542        if (xlog_header_check_recover(log->l_mp, rhead))
4543                return -EIO;
4544
4545        trace_xfs_log_recover_record(log, rhead, pass);
4546        while ((dp < end) && num_logops) {
4547
4548                ohead = (struct xlog_op_header *)dp;
4549                dp += sizeof(*ohead);
4550                ASSERT(dp <= end);
4551
4552                /* errors will abort recovery */
4553                error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4554                                                   dp, end, pass, buffer_list);
4555                if (error)
4556                        return error;
4557
4558                dp += be32_to_cpu(ohead->oh_len);
4559                num_logops--;
4560        }
4561        return 0;
4562}
4563
4564/* Recover the EFI if necessary. */
4565STATIC int
4566xlog_recover_process_efi(
4567        struct xfs_mount                *mp,
4568        struct xfs_ail                  *ailp,
4569        struct xfs_log_item             *lip)
4570{
4571        struct xfs_efi_log_item         *efip;
4572        int                             error;
4573
4574        /*
4575         * Skip EFIs that we've already processed.
4576         */
4577        efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4578        if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4579                return 0;
4580
4581        spin_unlock(&ailp->ail_lock);
4582        error = xfs_efi_recover(mp, efip);
4583        spin_lock(&ailp->ail_lock);
4584
4585        return error;
4586}
4587
4588/* Release the EFI since we're cancelling everything. */
4589STATIC void
4590xlog_recover_cancel_efi(
4591        struct xfs_mount                *mp,
4592        struct xfs_ail                  *ailp,
4593        struct xfs_log_item             *lip)
4594{
4595        struct xfs_efi_log_item         *efip;
4596
4597        efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4598
4599        spin_unlock(&ailp->ail_lock);
4600        xfs_efi_release(efip);
4601        spin_lock(&ailp->ail_lock);
4602}
4603
4604/* Recover the RUI if necessary. */
4605STATIC int
4606xlog_recover_process_rui(
4607        struct xfs_mount                *mp,
4608        struct xfs_ail                  *ailp,
4609        struct xfs_log_item             *lip)
4610{
4611        struct xfs_rui_log_item         *ruip;
4612        int                             error;
4613
4614        /*
4615         * Skip RUIs that we've already processed.
4616         */
4617        ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4618        if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4619                return 0;
4620
4621        spin_unlock(&ailp->ail_lock);
4622        error = xfs_rui_recover(mp, ruip);
4623        spin_lock(&ailp->ail_lock);
4624
4625        return error;
4626}
4627
4628/* Release the RUI since we're cancelling everything. */
4629STATIC void
4630xlog_recover_cancel_rui(
4631        struct xfs_mount                *mp,
4632        struct xfs_ail                  *ailp,
4633        struct xfs_log_item             *lip)
4634{
4635        struct xfs_rui_log_item         *ruip;
4636
4637        ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4638
4639        spin_unlock(&ailp->ail_lock);
4640        xfs_rui_release(ruip);
4641        spin_lock(&ailp->ail_lock);
4642}
4643
4644/* Recover the CUI if necessary. */
4645STATIC int
4646xlog_recover_process_cui(
4647        struct xfs_trans                *parent_tp,
4648        struct xfs_ail                  *ailp,
4649        struct xfs_log_item             *lip)
4650{
4651        struct xfs_cui_log_item         *cuip;
4652        int                             error;
4653
4654        /*
4655         * Skip CUIs that we've already processed.
4656         */
4657        cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4658        if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4659                return 0;
4660
4661        spin_unlock(&ailp->ail_lock);
4662        error = xfs_cui_recover(parent_tp, cuip);
4663        spin_lock(&ailp->ail_lock);
4664
4665        return error;
4666}
4667
4668/* Release the CUI since we're cancelling everything. */
4669STATIC void
4670xlog_recover_cancel_cui(
4671        struct xfs_mount                *mp,
4672        struct xfs_ail                  *ailp,
4673        struct xfs_log_item             *lip)
4674{
4675        struct xfs_cui_log_item         *cuip;
4676
4677        cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4678
4679        spin_unlock(&ailp->ail_lock);
4680        xfs_cui_release(cuip);
4681        spin_lock(&ailp->ail_lock);
4682}
4683
4684/* Recover the BUI if necessary. */
4685STATIC int
4686xlog_recover_process_bui(
4687        struct xfs_trans                *parent_tp,
4688        struct xfs_ail                  *ailp,
4689        struct xfs_log_item             *lip)
4690{
4691        struct xfs_bui_log_item         *buip;
4692        int                             error;
4693
4694        /*
4695         * Skip BUIs that we've already processed.
4696         */
4697        buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4698        if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4699                return 0;
4700
4701        spin_unlock(&ailp->ail_lock);
4702        error = xfs_bui_recover(parent_tp, buip);
4703        spin_lock(&ailp->ail_lock);
4704
4705        return error;
4706}
4707
4708/* Release the BUI since we're cancelling everything. */
4709STATIC void
4710xlog_recover_cancel_bui(
4711        struct xfs_mount                *mp,
4712        struct xfs_ail                  *ailp,
4713        struct xfs_log_item             *lip)
4714{
4715        struct xfs_bui_log_item         *buip;
4716
4717        buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4718
4719        spin_unlock(&ailp->ail_lock);
4720        xfs_bui_release(buip);
4721        spin_lock(&ailp->ail_lock);
4722}
4723
4724/* Is this log item a deferred action intent? */
4725static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4726{
4727        switch (lip->li_type) {
4728        case XFS_LI_EFI:
4729        case XFS_LI_RUI:
4730        case XFS_LI_CUI:
4731        case XFS_LI_BUI:
4732                return true;
4733        default:
4734                return false;
4735        }
4736}
4737
4738/* Take all the collected deferred ops and finish them in order. */
4739static int
4740xlog_finish_defer_ops(
4741        struct xfs_trans        *parent_tp)
4742{
4743        struct xfs_mount        *mp = parent_tp->t_mountp;
4744        struct xfs_trans        *tp;
4745        int64_t                 freeblks;
4746        uint                    resblks;
4747        int                     error;
4748
4749        /*
4750         * We're finishing the defer_ops that accumulated as a result of
4751         * recovering unfinished intent items during log recovery.  We
4752         * reserve an itruncate transaction because it is the largest
4753         * permanent transaction type.  Since we're the only user of the fs
4754         * right now, take 93% (15/16) of the available free blocks.  Use
4755         * weird math to avoid a 64-bit division.
4756         */
4757        freeblks = percpu_counter_sum(&mp->m_fdblocks);
4758        if (freeblks <= 0)
4759                return -ENOSPC;
4760        resblks = min_t(int64_t, UINT_MAX, freeblks);
4761        resblks = (resblks * 15) >> 4;
4762        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
4763                        0, XFS_TRANS_RESERVE, &tp);
4764        if (error)
4765                return error;
4766        /* transfer all collected dfops to this transaction */
4767        xfs_defer_move(tp, parent_tp);
4768
4769        return xfs_trans_commit(tp);
4770}
4771
4772/*
4773 * When this is called, all of the log intent items which did not have
4774 * corresponding log done items should be in the AIL.  What we do now
4775 * is update the data structures associated with each one.
4776 *
4777 * Since we process the log intent items in normal transactions, they
4778 * will be removed at some point after the commit.  This prevents us
4779 * from just walking down the list processing each one.  We'll use a
4780 * flag in the intent item to skip those that we've already processed
4781 * and use the AIL iteration mechanism's generation count to try to
4782 * speed this up at least a bit.
4783 *
4784 * When we start, we know that the intents are the only things in the
4785 * AIL.  As we process them, however, other items are added to the
4786 * AIL.
4787 */
4788STATIC int
4789xlog_recover_process_intents(
4790        struct xlog             *log)
4791{
4792        struct xfs_trans        *parent_tp;
4793        struct xfs_ail_cursor   cur;
4794        struct xfs_log_item     *lip;
4795        struct xfs_ail          *ailp;
4796        int                     error;
4797#if defined(DEBUG) || defined(XFS_WARN)
4798        xfs_lsn_t               last_lsn;
4799#endif
4800
4801        /*
4802         * The intent recovery handlers commit transactions to complete recovery
4803         * for individual intents, but any new deferred operations that are
4804         * queued during that process are held off until the very end. The
4805         * purpose of this transaction is to serve as a container for deferred
4806         * operations. Each intent recovery handler must transfer dfops here
4807         * before its local transaction commits, and we'll finish the entire
4808         * list below.
4809         */
4810        error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
4811        if (error)
4812                return error;
4813
4814        ailp = log->l_ailp;
4815        spin_lock(&ailp->ail_lock);
4816        lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4817#if defined(DEBUG) || defined(XFS_WARN)
4818        last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4819#endif
4820        while (lip != NULL) {
4821                /*
4822                 * We're done when we see something other than an intent.
4823                 * There should be no intents left in the AIL now.
4824                 */
4825                if (!xlog_item_is_intent(lip)) {
4826#ifdef DEBUG
4827                        for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4828                                ASSERT(!xlog_item_is_intent(lip));
4829#endif
4830                        break;
4831                }
4832
4833                /*
4834                 * We should never see a redo item with a LSN higher than
4835                 * the last transaction we found in the log at the start
4836                 * of recovery.
4837                 */
4838                ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4839
4840                /*
4841                 * NOTE: If your intent processing routine can create more
4842                 * deferred ops, you /must/ attach them to the dfops in this
4843                 * routine or else those subsequent intents will get
4844                 * replayed in the wrong order!
4845                 */
4846                switch (lip->li_type) {
4847                case XFS_LI_EFI:
4848                        error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4849                        break;
4850                case XFS_LI_RUI:
4851                        error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4852                        break;
4853                case XFS_LI_CUI:
4854                        error = xlog_recover_process_cui(parent_tp, ailp, lip);
4855                        break;
4856                case XFS_LI_BUI:
4857                        error = xlog_recover_process_bui(parent_tp, ailp, lip);
4858                        break;
4859                }
4860                if (error)
4861                        goto out;
4862                lip = xfs_trans_ail_cursor_next(ailp, &cur);
4863        }
4864out:
4865        xfs_trans_ail_cursor_done(&cur);
4866        spin_unlock(&ailp->ail_lock);
4867        if (!error)
4868                error = xlog_finish_defer_ops(parent_tp);
4869        xfs_trans_cancel(parent_tp);
4870
4871        return error;
4872}
4873
4874/*
4875 * A cancel occurs when the mount has failed and we're bailing out.
4876 * Release all pending log intent items so they don't pin the AIL.
4877 */
4878STATIC void
4879xlog_recover_cancel_intents(
4880        struct xlog             *log)
4881{
4882        struct xfs_log_item     *lip;
4883        struct xfs_ail_cursor   cur;
4884        struct xfs_ail          *ailp;
4885
4886        ailp = log->l_ailp;
4887        spin_lock(&ailp->ail_lock);
4888        lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4889        while (lip != NULL) {
4890                /*
4891                 * We're done when we see something other than an intent.
4892                 * There should be no intents left in the AIL now.
4893                 */
4894                if (!xlog_item_is_intent(lip)) {
4895#ifdef DEBUG
4896                        for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4897                                ASSERT(!xlog_item_is_intent(lip));
4898#endif
4899                        break;
4900                }
4901
4902                switch (lip->li_type) {
4903                case XFS_LI_EFI:
4904                        xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4905                        break;
4906                case XFS_LI_RUI:
4907                        xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4908                        break;
4909                case XFS_LI_CUI:
4910                        xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4911                        break;
4912                case XFS_LI_BUI:
4913                        xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4914                        break;
4915                }
4916
4917                lip = xfs_trans_ail_cursor_next(ailp, &cur);
4918        }
4919
4920        xfs_trans_ail_cursor_done(&cur);
4921        spin_unlock(&ailp->ail_lock);
4922}
4923
4924/*
4925 * This routine performs a transaction to null out a bad inode pointer
4926 * in an agi unlinked inode hash bucket.
4927 */
4928STATIC void
4929xlog_recover_clear_agi_bucket(
4930        xfs_mount_t     *mp,
4931        xfs_agnumber_t  agno,
4932        int             bucket)
4933{
4934        xfs_trans_t     *tp;
4935        xfs_agi_t       *agi;
4936        xfs_buf_t       *agibp;
4937        int             offset;
4938        int             error;
4939
4940        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
4941        if (error)
4942                goto out_error;
4943
4944        error = xfs_read_agi(mp, tp, agno, &agibp);
4945        if (error)
4946                goto out_abort;
4947
4948        agi = XFS_BUF_TO_AGI(agibp);
4949        agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4950        offset = offsetof(xfs_agi_t, agi_unlinked) +
4951                 (sizeof(xfs_agino_t) * bucket);
4952        xfs_trans_log_buf(tp, agibp, offset,
4953                          (offset + sizeof(xfs_agino_t) - 1));
4954
4955        error = xfs_trans_commit(tp);
4956        if (error)
4957                goto out_error;
4958        return;
4959
4960out_abort:
4961        xfs_trans_cancel(tp);
4962out_error:
4963        xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4964        return;
4965}
4966
4967STATIC xfs_agino_t
4968xlog_recover_process_one_iunlink(
4969        struct xfs_mount                *mp,
4970        xfs_agnumber_t                  agno,
4971        xfs_agino_t                     agino,
4972        int                             bucket)
4973{
4974        struct xfs_buf                  *ibp;
4975        struct xfs_dinode               *dip;
4976        struct xfs_inode                *ip;
4977        xfs_ino_t                       ino;
4978        int                             error;
4979
4980        ino = XFS_AGINO_TO_INO(mp, agno, agino);
4981        error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4982        if (error)
4983                goto fail;
4984
4985        /*
4986         * Get the on disk inode to find the next inode in the bucket.
4987         */
4988        error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
4989        if (error)
4990                goto fail_iput;
4991
4992        xfs_iflags_clear(ip, XFS_IRECOVERY);
4993        ASSERT(VFS_I(ip)->i_nlink == 0);
4994        ASSERT(VFS_I(ip)->i_mode != 0);
4995
4996        /* setup for the next pass */
4997        agino = be32_to_cpu(dip->di_next_unlinked);
4998        xfs_buf_relse(ibp);
4999
5000        /*
5001         * Prevent any DMAPI event from being sent when the reference on
5002         * the inode is dropped.
5003         */
5004        ip->i_d.di_dmevmask = 0;
5005
5006        xfs_irele(ip);
5007        return agino;
5008
5009 fail_iput:
5010        xfs_irele(ip);
5011 fail:
5012        /*
5013         * We can't read in the inode this bucket points to, or this inode
5014         * is messed up.  Just ditch this bucket of inodes.  We will lose
5015         * some inodes and space, but at least we won't hang.
5016         *
5017         * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5018         * clear the inode pointer in the bucket.
5019         */
5020        xlog_recover_clear_agi_bucket(mp, agno, bucket);
5021        return NULLAGINO;
5022}
5023
5024/*
5025 * xlog_iunlink_recover
5026 *
5027 * This is called during recovery to process any inodes which
5028 * we unlinked but not freed when the system crashed.  These
5029 * inodes will be on the lists in the AGI blocks.  What we do
5030 * here is scan all the AGIs and fully truncate and free any
5031 * inodes found on the lists.  Each inode is removed from the
5032 * lists when it has been fully truncated and is freed.  The
5033 * freeing of the inode and its removal from the list must be
5034 * atomic.
5035 */
5036STATIC void
5037xlog_recover_process_iunlinks(
5038        struct xlog     *log)
5039{
5040        xfs_mount_t     *mp;
5041        xfs_agnumber_t  agno;
5042        xfs_agi_t       *agi;
5043        xfs_buf_t       *agibp;
5044        xfs_agino_t     agino;
5045        int             bucket;
5046        int             error;
5047
5048        mp = log->l_mp;
5049
5050        for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5051                /*
5052                 * Find the agi for this ag.
5053                 */
5054                error = xfs_read_agi(mp, NULL, agno, &agibp);
5055                if (error) {
5056                        /*
5057                         * AGI is b0rked. Don't process it.
5058                         *
5059                         * We should probably mark the filesystem as corrupt
5060                         * after we've recovered all the ag's we can....
5061                         */
5062                        continue;
5063                }
5064                /*
5065                 * Unlock the buffer so that it can be acquired in the normal
5066                 * course of the transaction to truncate and free each inode.
5067                 * Because we are not racing with anyone else here for the AGI
5068                 * buffer, we don't even need to hold it locked to read the
5069                 * initial unlinked bucket entries out of the buffer. We keep
5070                 * buffer reference though, so that it stays pinned in memory
5071                 * while we need the buffer.
5072                 */
5073                agi = XFS_BUF_TO_AGI(agibp);
5074                xfs_buf_unlock(agibp);
5075
5076                for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5077                        agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5078                        while (agino != NULLAGINO) {
5079                                agino = xlog_recover_process_one_iunlink(mp,
5080                                                        agno, agino, bucket);
5081                        }
5082                }
5083                xfs_buf_rele(agibp);
5084        }
5085}
5086
5087STATIC void
5088xlog_unpack_data(
5089        struct xlog_rec_header  *rhead,
5090        char                    *dp,
5091        struct xlog             *log)
5092{
5093        int                     i, j, k;
5094
5095        for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5096                  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5097                *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5098                dp += BBSIZE;
5099        }
5100
5101        if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5102                xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5103                for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5104                        j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5105                        k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5106                        *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5107                        dp += BBSIZE;
5108                }
5109        }
5110}
5111
5112/*
5113 * CRC check, unpack and process a log record.
5114 */
5115STATIC int
5116xlog_recover_process(
5117        struct xlog             *log,
5118        struct hlist_head       rhash[],
5119        struct xlog_rec_header  *rhead,
5120        char                    *dp,
5121        int                     pass,
5122        struct list_head        *buffer_list)
5123{
5124        __le32                  old_crc = rhead->h_crc;
5125        __le32                  crc;
5126
5127        crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5128
5129        /*
5130         * Nothing else to do if this is a CRC verification pass. Just return
5131         * if this a record with a non-zero crc. Unfortunately, mkfs always
5132         * sets old_crc to 0 so we must consider this valid even on v5 supers.
5133         * Otherwise, return EFSBADCRC on failure so the callers up the stack
5134         * know precisely what failed.
5135         */
5136        if (pass == XLOG_RECOVER_CRCPASS) {
5137                if (old_crc && crc != old_crc)
5138                        return -EFSBADCRC;
5139                return 0;
5140        }
5141
5142        /*
5143         * We're in the normal recovery path. Issue a warning if and only if the
5144         * CRC in the header is non-zero. This is an advisory warning and the
5145         * zero CRC check prevents warnings from being emitted when upgrading
5146         * the kernel from one that does not add CRCs by default.
5147         */
5148        if (crc != old_crc) {
5149                if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5150                        xfs_alert(log->l_mp,
5151                "log record CRC mismatch: found 0x%x, expected 0x%x.",
5152                                        le32_to_cpu(old_crc),
5153                                        le32_to_cpu(crc));
5154                        xfs_hex_dump(dp, 32);
5155                }
5156
5157                /*
5158                 * If the filesystem is CRC enabled, this mismatch becomes a
5159                 * fatal log corruption failure.
5160                 */
5161                if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5162                        return -EFSCORRUPTED;
5163        }
5164
5165        xlog_unpack_data(rhead, dp, log);
5166
5167        return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5168                                         buffer_list);
5169}
5170
5171STATIC int
5172xlog_valid_rec_header(
5173        struct xlog             *log,
5174        struct xlog_rec_header  *rhead,
5175        xfs_daddr_t             blkno)
5176{
5177        int                     hlen;
5178
5179        if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5180                XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5181                                XFS_ERRLEVEL_LOW, log->l_mp);
5182                return -EFSCORRUPTED;
5183        }
5184        if (unlikely(
5185            (!rhead->h_version ||
5186            (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5187                xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5188                        __func__, be32_to_cpu(rhead->h_version));
5189                return -EIO;
5190        }
5191
5192        /* LR body must have data or it wouldn't have been written */
5193        hlen = be32_to_cpu(rhead->h_len);
5194        if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5195                XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5196                                XFS_ERRLEVEL_LOW, log->l_mp);
5197                return -EFSCORRUPTED;
5198        }
5199        if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5200                XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5201                                XFS_ERRLEVEL_LOW, log->l_mp);
5202                return -EFSCORRUPTED;
5203        }
5204        return 0;
5205}
5206
5207/*
5208 * Read the log from tail to head and process the log records found.
5209 * Handle the two cases where the tail and head are in the same cycle
5210 * and where the active portion of the log wraps around the end of
5211 * the physical log separately.  The pass parameter is passed through
5212 * to the routines called to process the data and is not looked at
5213 * here.
5214 */
5215STATIC int
5216xlog_do_recovery_pass(
5217        struct xlog             *log,
5218        xfs_daddr_t             head_blk,
5219        xfs_daddr_t             tail_blk,
5220        int                     pass,
5221        xfs_daddr_t             *first_bad)     /* out: first bad log rec */
5222{
5223        xlog_rec_header_t       *rhead;
5224        xfs_daddr_t             blk_no, rblk_no;
5225        xfs_daddr_t             rhead_blk;
5226        char                    *offset;
5227        char                    *hbp, *dbp;
5228        int                     error = 0, h_size, h_len;
5229        int                     error2 = 0;
5230        int                     bblks, split_bblks;
5231        int                     hblks, split_hblks, wrapped_hblks;
5232        int                     i;
5233        struct hlist_head       rhash[XLOG_RHASH_SIZE];
5234        LIST_HEAD               (buffer_list);
5235
5236        ASSERT(head_blk != tail_blk);
5237        blk_no = rhead_blk = tail_blk;
5238
5239        for (i = 0; i < XLOG_RHASH_SIZE; i++)
5240                INIT_HLIST_HEAD(&rhash[i]);
5241
5242        /*
5243         * Read the header of the tail block and get the iclog buffer size from
5244         * h_size.  Use this to tell how many sectors make up the log header.
5245         */
5246        if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5247                /*
5248                 * When using variable length iclogs, read first sector of
5249                 * iclog header and extract the header size from it.  Get a
5250                 * new hbp that is the correct size.
5251                 */
5252                hbp = xlog_alloc_buffer(log, 1);
5253                if (!hbp)
5254                        return -ENOMEM;
5255
5256                error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5257                if (error)
5258                        goto bread_err1;
5259
5260                rhead = (xlog_rec_header_t *)offset;
5261                error = xlog_valid_rec_header(log, rhead, tail_blk);
5262                if (error)
5263                        goto bread_err1;
5264
5265                /*
5266                 * xfsprogs has a bug where record length is based on lsunit but
5267                 * h_size (iclog size) is hardcoded to 32k. Now that we
5268                 * unconditionally CRC verify the unmount record, this means the
5269                 * log buffer can be too small for the record and cause an
5270                 * overrun.
5271                 *
5272                 * Detect this condition here. Use lsunit for the buffer size as
5273                 * long as this looks like the mkfs case. Otherwise, return an
5274                 * error to avoid a buffer overrun.
5275                 */
5276                h_size = be32_to_cpu(rhead->h_size);
5277                h_len = be32_to_cpu(rhead->h_len);
5278                if (h_len > h_size) {
5279                        if (h_len <= log->l_mp->m_logbsize &&
5280                            be32_to_cpu(rhead->h_num_logops) == 1) {
5281                                xfs_warn(log->l_mp,
5282                "invalid iclog size (%d bytes), using lsunit (%d bytes)",
5283                                         h_size, log->l_mp->m_logbsize);
5284                                h_size = log->l_mp->m_logbsize;
5285                        } else
5286                                return -EFSCORRUPTED;
5287                }
5288
5289                if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5290                    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5291                        hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5292                        if (h_size % XLOG_HEADER_CYCLE_SIZE)
5293                                hblks++;
5294                        kmem_free(hbp);
5295                        hbp = xlog_alloc_buffer(log, hblks);
5296                } else {
5297                        hblks = 1;
5298                }
5299        } else {
5300                ASSERT(log->l_sectBBsize == 1);
5301                hblks = 1;
5302                hbp = xlog_alloc_buffer(log, 1);
5303                h_size = XLOG_BIG_RECORD_BSIZE;
5304        }
5305
5306        if (!hbp)
5307                return -ENOMEM;
5308        dbp = xlog_alloc_buffer(log, BTOBB(h_size));
5309        if (!dbp) {
5310                kmem_free(hbp);
5311                return -ENOMEM;
5312        }
5313
5314        memset(rhash, 0, sizeof(rhash));
5315        if (tail_blk > head_blk) {
5316                /*
5317                 * Perform recovery around the end of the physical log.
5318                 * When the head is not on the same cycle number as the tail,
5319                 * we can't do a sequential recovery.
5320                 */
5321                while (blk_no < log->l_logBBsize) {
5322                        /*
5323                         * Check for header wrapping around physical end-of-log
5324                         */
5325                        offset = hbp;
5326                        split_hblks = 0;
5327                        wrapped_hblks = 0;
5328                        if (blk_no + hblks <= log->l_logBBsize) {
5329                                /* Read header in one read */
5330                                error = xlog_bread(log, blk_no, hblks, hbp,
5331                                                   &offset);
5332                                if (error)
5333                                        goto bread_err2;
5334                        } else {
5335                                /* This LR is split across physical log end */
5336                                if (blk_no != log->l_logBBsize) {
5337                                        /* some data before physical log end */
5338                                        ASSERT(blk_no <= INT_MAX);
5339                                        split_hblks = log->l_logBBsize - (int)blk_no;
5340                                        ASSERT(split_hblks > 0);
5341                                        error = xlog_bread(log, blk_no,
5342                                                           split_hblks, hbp,
5343                                                           &offset);
5344                                        if (error)
5345                                                goto bread_err2;
5346                                }
5347
5348                                /*
5349                                 * Note: this black magic still works with
5350                                 * large sector sizes (non-512) only because:
5351                                 * - we increased the buffer size originally
5352                                 *   by 1 sector giving us enough extra space
5353                                 *   for the second read;
5354                                 * - the log start is guaranteed to be sector
5355                                 *   aligned;
5356                                 * - we read the log end (LR header start)
5357                                 *   _first_, then the log start (LR header end)
5358                                 *   - order is important.
5359                                 */
5360                                wrapped_hblks = hblks - split_hblks;
5361                                error = xlog_bread_noalign(log, 0,
5362                                                wrapped_hblks,
5363                                                offset + BBTOB(split_hblks));
5364                                if (error)
5365                                        goto bread_err2;
5366                        }
5367                        rhead = (xlog_rec_header_t *)offset;
5368                        error = xlog_valid_rec_header(log, rhead,
5369                                                split_hblks ? blk_no : 0);
5370                        if (error)
5371                                goto bread_err2;
5372
5373                        bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5374                        blk_no += hblks;
5375
5376                        /*
5377                         * Read the log record data in multiple reads if it
5378                         * wraps around the end of the log. Note that if the
5379                         * header already wrapped, blk_no could point past the
5380                         * end of the log. The record data is contiguous in
5381                         * that case.
5382                         */
5383                        if (blk_no + bblks <= log->l_logBBsize ||
5384                            blk_no >= log->l_logBBsize) {
5385                                rblk_no = xlog_wrap_logbno(log, blk_no);
5386                                error = xlog_bread(log, rblk_no, bblks, dbp,
5387                                                   &offset);
5388                                if (error)
5389                                        goto bread_err2;
5390                        } else {
5391                                /* This log record is split across the
5392                                 * physical end of log */
5393                                offset = dbp;
5394                                split_bblks = 0;
5395                                if (blk_no != log->l_logBBsize) {
5396                                        /* some data is before the physical
5397                                         * end of log */
5398                                        ASSERT(!wrapped_hblks);
5399                                        ASSERT(blk_no <= INT_MAX);
5400                                        split_bblks =
5401                                                log->l_logBBsize - (int)blk_no;
5402                                        ASSERT(split_bblks > 0);
5403                                        error = xlog_bread(log, blk_no,
5404                                                        split_bblks, dbp,
5405                                                        &offset);
5406                                        if (error)
5407                                                goto bread_err2;
5408                                }
5409
5410                                /*
5411                                 * Note: this black magic still works with
5412                                 * large sector sizes (non-512) only because:
5413                                 * - we increased the buffer size originally
5414                                 *   by 1 sector giving us enough extra space
5415                                 *   for the second read;
5416                                 * - the log start is guaranteed to be sector
5417                                 *   aligned;
5418                                 * - we read the log end (LR header start)
5419                                 *   _first_, then the log start (LR header end)
5420                                 *   - order is important.
5421                                 */
5422                                error = xlog_bread_noalign(log, 0,
5423                                                bblks - split_bblks,
5424                                                offset + BBTOB(split_bblks));
5425                                if (error)
5426                                        goto bread_err2;
5427                        }
5428
5429                        error = xlog_recover_process(log, rhash, rhead, offset,
5430                                                     pass, &buffer_list);
5431                        if (error)
5432                                goto bread_err2;
5433
5434                        blk_no += bblks;
5435                        rhead_blk = blk_no;
5436                }
5437
5438                ASSERT(blk_no >= log->l_logBBsize);
5439                blk_no -= log->l_logBBsize;
5440                rhead_blk = blk_no;
5441        }
5442
5443        /* read first part of physical log */
5444        while (blk_no < head_blk) {
5445                error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5446                if (error)
5447                        goto bread_err2;
5448
5449                rhead = (xlog_rec_header_t *)offset;
5450                error = xlog_valid_rec_header(log, rhead, blk_no);
5451                if (error)
5452                        goto bread_err2;
5453
5454                /* blocks in data section */
5455                bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5456                error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5457                                   &offset);
5458                if (error)
5459                        goto bread_err2;
5460
5461                error = xlog_recover_process(log, rhash, rhead, offset, pass,
5462                                             &buffer_list);
5463                if (error)
5464                        goto bread_err2;
5465
5466                blk_no += bblks + hblks;
5467                rhead_blk = blk_no;
5468        }
5469
5470 bread_err2:
5471        kmem_free(dbp);
5472 bread_err1:
5473        kmem_free(hbp);
5474
5475        /*
5476         * Submit buffers that have been added from the last record processed,
5477         * regardless of error status.
5478         */
5479        if (!list_empty(&buffer_list))
5480                error2 = xfs_buf_delwri_submit(&buffer_list);
5481
5482        if (error && first_bad)
5483                *first_bad = rhead_blk;
5484
5485        /*
5486         * Transactions are freed at commit time but transactions without commit
5487         * records on disk are never committed. Free any that may be left in the
5488         * hash table.
5489         */
5490        for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5491                struct hlist_node       *tmp;
5492                struct xlog_recover     *trans;
5493
5494                hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5495                        xlog_recover_free_trans(trans);
5496        }
5497
5498        return error ? error : error2;
5499}
5500
5501/*
5502 * Do the recovery of the log.  We actually do this in two phases.
5503 * The two passes are necessary in order to implement the function
5504 * of cancelling a record written into the log.  The first pass
5505 * determines those things which have been cancelled, and the
5506 * second pass replays log items normally except for those which
5507 * have been cancelled.  The handling of the replay and cancellations
5508 * takes place in the log item type specific routines.
5509 *
5510 * The table of items which have cancel records in the log is allocated
5511 * and freed at this level, since only here do we know when all of
5512 * the log recovery has been completed.
5513 */
5514STATIC int
5515xlog_do_log_recovery(
5516        struct xlog     *log,
5517        xfs_daddr_t     head_blk,
5518        xfs_daddr_t     tail_blk)
5519{
5520        int             error, i;
5521
5522        ASSERT(head_blk != tail_blk);
5523
5524        /*
5525         * First do a pass to find all of the cancelled buf log items.
5526         * Store them in the buf_cancel_table for use in the second pass.
5527         */
5528        log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5529                                                 sizeof(struct list_head),
5530                                                 KM_SLEEP);
5531        for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5532                INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5533
5534        error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5535                                      XLOG_RECOVER_PASS1, NULL);
5536        if (error != 0) {
5537                kmem_free(log->l_buf_cancel_table);
5538                log->l_buf_cancel_table = NULL;
5539                return error;
5540        }
5541        /*
5542         * Then do a second pass to actually recover the items in the log.
5543         * When it is complete free the table of buf cancel items.
5544         */
5545        error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5546                                      XLOG_RECOVER_PASS2, NULL);
5547#ifdef DEBUG
5548        if (!error) {
5549                int     i;
5550
5551                for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5552                        ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5553        }
5554#endif  /* DEBUG */
5555
5556        kmem_free(log->l_buf_cancel_table);
5557        log->l_buf_cancel_table = NULL;
5558
5559        return error;
5560}
5561
5562/*
5563 * Do the actual recovery
5564 */
5565STATIC int
5566xlog_do_recover(
5567        struct xlog     *log,
5568        xfs_daddr_t     head_blk,
5569        xfs_daddr_t     tail_blk)
5570{
5571        struct xfs_mount *mp = log->l_mp;
5572        int             error;
5573        xfs_buf_t       *bp;
5574        xfs_sb_t        *sbp;
5575
5576        trace_xfs_log_recover(log, head_blk, tail_blk);
5577
5578        /*
5579         * First replay the images in the log.
5580         */
5581        error = xlog_do_log_recovery(log, head_blk, tail_blk);
5582        if (error)
5583                return error;
5584
5585        /*
5586         * If IO errors happened during recovery, bail out.
5587         */
5588        if (XFS_FORCED_SHUTDOWN(mp)) {
5589                return -EIO;
5590        }
5591
5592        /*
5593         * We now update the tail_lsn since much of the recovery has completed
5594         * and there may be space available to use.  If there were no extent
5595         * or iunlinks, we can free up the entire log and set the tail_lsn to
5596         * be the last_sync_lsn.  This was set in xlog_find_tail to be the
5597         * lsn of the last known good LR on disk.  If there are extent frees
5598         * or iunlinks they will have some entries in the AIL; so we look at
5599         * the AIL to determine how to set the tail_lsn.
5600         */
5601        xlog_assign_tail_lsn(mp);
5602
5603        /*
5604         * Now that we've finished replaying all buffer and inode
5605         * updates, re-read in the superblock and reverify it.
5606         */
5607        bp = xfs_getsb(mp);
5608        bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5609        ASSERT(!(bp->b_flags & XBF_WRITE));
5610        bp->b_flags |= XBF_READ;
5611        bp->b_ops = &xfs_sb_buf_ops;
5612
5613        error = xfs_buf_submit(bp);
5614        if (error) {
5615                if (!XFS_FORCED_SHUTDOWN(mp)) {
5616                        xfs_buf_ioerror_alert(bp, __func__);
5617                        ASSERT(0);
5618                }
5619                xfs_buf_relse(bp);
5620                return error;
5621        }
5622
5623        /* Convert superblock from on-disk format */
5624        sbp = &mp->m_sb;
5625        xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5626        xfs_buf_relse(bp);
5627
5628        /* re-initialise in-core superblock and geometry structures */
5629        xfs_reinit_percpu_counters(mp);
5630        error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5631        if (error) {
5632                xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5633                return error;
5634        }
5635        mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5636
5637        xlog_recover_check_summary(log);
5638
5639        /* Normal transactions can now occur */
5640        log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5641        return 0;
5642}
5643
5644/*
5645 * Perform recovery and re-initialize some log variables in xlog_find_tail.
5646 *
5647 * Return error or zero.
5648 */
5649int
5650xlog_recover(
5651        struct xlog     *log)
5652{
5653        xfs_daddr_t     head_blk, tail_blk;
5654        int             error;
5655
5656        /* find the tail of the log */
5657        error = xlog_find_tail(log, &head_blk, &tail_blk);
5658        if (error)
5659                return error;
5660
5661        /*
5662         * The superblock was read before the log was available and thus the LSN
5663         * could not be verified. Check the superblock LSN against the current
5664         * LSN now that it's known.
5665         */
5666        if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5667            !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5668                return -EINVAL;
5669
5670        if (tail_blk != head_blk) {
5671                /* There used to be a comment here:
5672                 *
5673                 * disallow recovery on read-only mounts.  note -- mount
5674                 * checks for ENOSPC and turns it into an intelligent
5675                 * error message.
5676                 * ...but this is no longer true.  Now, unless you specify
5677                 * NORECOVERY (in which case this function would never be
5678                 * called), we just go ahead and recover.  We do this all
5679                 * under the vfs layer, so we can get away with it unless
5680                 * the device itself is read-only, in which case we fail.
5681                 */
5682                if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5683                        return error;
5684                }
5685
5686                /*
5687                 * Version 5 superblock log feature mask validation. We know the
5688                 * log is dirty so check if there are any unknown log features
5689                 * in what we need to recover. If there are unknown features
5690                 * (e.g. unsupported transactions, then simply reject the
5691                 * attempt at recovery before touching anything.
5692                 */
5693                if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5694                    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5695                                        XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5696                        xfs_warn(log->l_mp,
5697"Superblock has unknown incompatible log features (0x%x) enabled.",
5698                                (log->l_mp->m_sb.sb_features_log_incompat &
5699                                        XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5700                        xfs_warn(log->l_mp,
5701"The log can not be fully and/or safely recovered by this kernel.");
5702                        xfs_warn(log->l_mp,
5703"Please recover the log on a kernel that supports the unknown features.");
5704                        return -EINVAL;
5705                }
5706
5707                /*
5708                 * Delay log recovery if the debug hook is set. This is debug
5709                 * instrumention to coordinate simulation of I/O failures with
5710                 * log recovery.
5711                 */
5712                if (xfs_globals.log_recovery_delay) {
5713                        xfs_notice(log->l_mp,
5714                                "Delaying log recovery for %d seconds.",
5715                                xfs_globals.log_recovery_delay);
5716                        msleep(xfs_globals.log_recovery_delay * 1000);
5717                }
5718
5719                xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5720                                log->l_mp->m_logname ? log->l_mp->m_logname
5721                                                     : "internal");
5722
5723                error = xlog_do_recover(log, head_blk, tail_blk);
5724                log->l_flags |= XLOG_RECOVERY_NEEDED;
5725        }
5726        return error;
5727}
5728
5729/*
5730 * In the first part of recovery we replay inodes and buffers and build
5731 * up the list of extent free items which need to be processed.  Here
5732 * we process the extent free items and clean up the on disk unlinked
5733 * inode lists.  This is separated from the first part of recovery so
5734 * that the root and real-time bitmap inodes can be read in from disk in
5735 * between the two stages.  This is necessary so that we can free space
5736 * in the real-time portion of the file system.
5737 */
5738int
5739xlog_recover_finish(
5740        struct xlog     *log)
5741{
5742        /*
5743         * Now we're ready to do the transactions needed for the
5744         * rest of recovery.  Start with completing all the extent
5745         * free intent records and then process the unlinked inode
5746         * lists.  At this point, we essentially run in normal mode
5747         * except that we're still performing recovery actions
5748         * rather than accepting new requests.
5749         */
5750        if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5751                int     error;
5752                error = xlog_recover_process_intents(log);
5753                if (error) {
5754                        xfs_alert(log->l_mp, "Failed to recover intents");
5755                        return error;
5756                }
5757
5758                /*
5759                 * Sync the log to get all the intents out of the AIL.
5760                 * This isn't absolutely necessary, but it helps in
5761                 * case the unlink transactions would have problems
5762                 * pushing the intents out of the way.
5763                 */
5764                xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5765
5766                xlog_recover_process_iunlinks(log);
5767
5768                xlog_recover_check_summary(log);
5769
5770                xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5771                                log->l_mp->m_logname ? log->l_mp->m_logname
5772                                                     : "internal");
5773                log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5774        } else {
5775                xfs_info(log->l_mp, "Ending clean mount");
5776        }
5777        return 0;
5778}
5779
5780void
5781xlog_recover_cancel(
5782        struct xlog     *log)
5783{
5784        if (log->l_flags & XLOG_RECOVERY_NEEDED)
5785                xlog_recover_cancel_intents(log);
5786}
5787
5788#if defined(DEBUG)
5789/*
5790 * Read all of the agf and agi counters and check that they
5791 * are consistent with the superblock counters.
5792 */
5793STATIC void
5794xlog_recover_check_summary(
5795        struct xlog     *log)
5796{
5797        xfs_mount_t     *mp;
5798        xfs_agf_t       *agfp;
5799        xfs_buf_t       *agfbp;
5800        xfs_buf_t       *agibp;
5801        xfs_agnumber_t  agno;
5802        uint64_t        freeblks;
5803        uint64_t        itotal;
5804        uint64_t        ifree;
5805        int             error;
5806
5807        mp = log->l_mp;
5808
5809        freeblks = 0LL;
5810        itotal = 0LL;
5811        ifree = 0LL;
5812        for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5813                error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5814                if (error) {
5815                        xfs_alert(mp, "%s agf read failed agno %d error %d",
5816                                                __func__, agno, error);
5817                } else {
5818                        agfp = XFS_BUF_TO_AGF(agfbp);
5819                        freeblks += be32_to_cpu(agfp->agf_freeblks) +
5820                                    be32_to_cpu(agfp->agf_flcount);
5821                        xfs_buf_relse(agfbp);
5822                }
5823
5824                error = xfs_read_agi(mp, NULL, agno, &agibp);
5825                if (error) {
5826                        xfs_alert(mp, "%s agi read failed agno %d error %d",
5827                                                __func__, agno, error);
5828                } else {
5829                        struct xfs_agi  *agi = XFS_BUF_TO_AGI(agibp);
5830
5831                        itotal += be32_to_cpu(agi->agi_count);
5832                        ifree += be32_to_cpu(agi->agi_freecount);
5833                        xfs_buf_relse(agibp);
5834                }
5835        }
5836}
5837#endif /* DEBUG */
5838