linux/fs/xfs/xfs_log_recover.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_sb.h"
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_trans.h"
  18#include "xfs_log.h"
  19#include "xfs_log_priv.h"
  20#include "xfs_log_recover.h"
  21#include "xfs_trans_priv.h"
  22#include "xfs_alloc.h"
  23#include "xfs_ialloc.h"
  24#include "xfs_trace.h"
  25#include "xfs_icache.h"
  26#include "xfs_error.h"
  27#include "xfs_buf_item.h"
  28#include "xfs_ag.h"
  29#include "xfs_quota.h"
  30
  31
  32#define BLK_AVG(blk1, blk2)     ((blk1+blk2) >> 1)
  33
  34STATIC int
  35xlog_find_zeroed(
  36        struct xlog     *,
  37        xfs_daddr_t     *);
  38STATIC int
  39xlog_clear_stale_blocks(
  40        struct xlog     *,
  41        xfs_lsn_t);
  42#if defined(DEBUG)
  43STATIC void
  44xlog_recover_check_summary(
  45        struct xlog *);
  46#else
  47#define xlog_recover_check_summary(log)
  48#endif
  49STATIC int
  50xlog_do_recovery_pass(
  51        struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
  52
  53/*
  54 * Sector aligned buffer routines for buffer create/read/write/access
  55 */
  56
  57/*
  58 * Verify the log-relative block number and length in basic blocks are valid for
  59 * an operation involving the given XFS log buffer. Returns true if the fields
  60 * are valid, false otherwise.
  61 */
  62static inline bool
  63xlog_verify_bno(
  64        struct xlog     *log,
  65        xfs_daddr_t     blk_no,
  66        int             bbcount)
  67{
  68        if (blk_no < 0 || blk_no >= log->l_logBBsize)
  69                return false;
  70        if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
  71                return false;
  72        return true;
  73}
  74
  75/*
  76 * Allocate a buffer to hold log data.  The buffer needs to be able to map to
  77 * a range of nbblks basic blocks at any valid offset within the log.
  78 */
  79static char *
  80xlog_alloc_buffer(
  81        struct xlog     *log,
  82        int             nbblks)
  83{
  84        /*
  85         * Pass log block 0 since we don't have an addr yet, buffer will be
  86         * verified on read.
  87         */
  88        if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
  89                xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
  90                        nbblks);
  91                return NULL;
  92        }
  93
  94        /*
  95         * We do log I/O in units of log sectors (a power-of-2 multiple of the
  96         * basic block size), so we round up the requested size to accommodate
  97         * the basic blocks required for complete log sectors.
  98         *
  99         * In addition, the buffer may be used for a non-sector-aligned block
 100         * offset, in which case an I/O of the requested size could extend
 101         * beyond the end of the buffer.  If the requested size is only 1 basic
 102         * block it will never straddle a sector boundary, so this won't be an
 103         * issue.  Nor will this be a problem if the log I/O is done in basic
 104         * blocks (sector size 1).  But otherwise we extend the buffer by one
 105         * extra log sector to ensure there's space to accommodate this
 106         * possibility.
 107         */
 108        if (nbblks > 1 && log->l_sectBBsize > 1)
 109                nbblks += log->l_sectBBsize;
 110        nbblks = round_up(nbblks, log->l_sectBBsize);
 111        return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
 112}
 113
 114/*
 115 * Return the address of the start of the given block number's data
 116 * in a log buffer.  The buffer covers a log sector-aligned region.
 117 */
 118static inline unsigned int
 119xlog_align(
 120        struct xlog     *log,
 121        xfs_daddr_t     blk_no)
 122{
 123        return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
 124}
 125
 126static int
 127xlog_do_io(
 128        struct xlog             *log,
 129        xfs_daddr_t             blk_no,
 130        unsigned int            nbblks,
 131        char                    *data,
 132        unsigned int            op)
 133{
 134        int                     error;
 135
 136        if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
 137                xfs_warn(log->l_mp,
 138                         "Invalid log block/length (0x%llx, 0x%x) for buffer",
 139                         blk_no, nbblks);
 140                return -EFSCORRUPTED;
 141        }
 142
 143        blk_no = round_down(blk_no, log->l_sectBBsize);
 144        nbblks = round_up(nbblks, log->l_sectBBsize);
 145        ASSERT(nbblks > 0);
 146
 147        error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
 148                        BBTOB(nbblks), data, op);
 149        if (error && !xlog_is_shutdown(log)) {
 150                xfs_alert(log->l_mp,
 151                          "log recovery %s I/O error at daddr 0x%llx len %d error %d",
 152                          op == REQ_OP_WRITE ? "write" : "read",
 153                          blk_no, nbblks, error);
 154        }
 155        return error;
 156}
 157
 158STATIC int
 159xlog_bread_noalign(
 160        struct xlog     *log,
 161        xfs_daddr_t     blk_no,
 162        int             nbblks,
 163        char            *data)
 164{
 165        return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
 166}
 167
 168STATIC int
 169xlog_bread(
 170        struct xlog     *log,
 171        xfs_daddr_t     blk_no,
 172        int             nbblks,
 173        char            *data,
 174        char            **offset)
 175{
 176        int             error;
 177
 178        error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
 179        if (!error)
 180                *offset = data + xlog_align(log, blk_no);
 181        return error;
 182}
 183
 184STATIC int
 185xlog_bwrite(
 186        struct xlog     *log,
 187        xfs_daddr_t     blk_no,
 188        int             nbblks,
 189        char            *data)
 190{
 191        return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
 192}
 193
 194#ifdef DEBUG
 195/*
 196 * dump debug superblock and log record information
 197 */
 198STATIC void
 199xlog_header_check_dump(
 200        xfs_mount_t             *mp,
 201        xlog_rec_header_t       *head)
 202{
 203        xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
 204                __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
 205        xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
 206                &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
 207}
 208#else
 209#define xlog_header_check_dump(mp, head)
 210#endif
 211
 212/*
 213 * check log record header for recovery
 214 */
 215STATIC int
 216xlog_header_check_recover(
 217        xfs_mount_t             *mp,
 218        xlog_rec_header_t       *head)
 219{
 220        ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 221
 222        /*
 223         * IRIX doesn't write the h_fmt field and leaves it zeroed
 224         * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
 225         * a dirty log created in IRIX.
 226         */
 227        if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
 228                xfs_warn(mp,
 229        "dirty log written in incompatible format - can't recover");
 230                xlog_header_check_dump(mp, head);
 231                return -EFSCORRUPTED;
 232        }
 233        if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
 234                                           &head->h_fs_uuid))) {
 235                xfs_warn(mp,
 236        "dirty log entry has mismatched uuid - can't recover");
 237                xlog_header_check_dump(mp, head);
 238                return -EFSCORRUPTED;
 239        }
 240        return 0;
 241}
 242
 243/*
 244 * read the head block of the log and check the header
 245 */
 246STATIC int
 247xlog_header_check_mount(
 248        xfs_mount_t             *mp,
 249        xlog_rec_header_t       *head)
 250{
 251        ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 252
 253        if (uuid_is_null(&head->h_fs_uuid)) {
 254                /*
 255                 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
 256                 * h_fs_uuid is null, we assume this log was last mounted
 257                 * by IRIX and continue.
 258                 */
 259                xfs_warn(mp, "null uuid in log - IRIX style log");
 260        } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
 261                                                  &head->h_fs_uuid))) {
 262                xfs_warn(mp, "log has mismatched uuid - can't recover");
 263                xlog_header_check_dump(mp, head);
 264                return -EFSCORRUPTED;
 265        }
 266        return 0;
 267}
 268
 269/*
 270 * This routine finds (to an approximation) the first block in the physical
 271 * log which contains the given cycle.  It uses a binary search algorithm.
 272 * Note that the algorithm can not be perfect because the disk will not
 273 * necessarily be perfect.
 274 */
 275STATIC int
 276xlog_find_cycle_start(
 277        struct xlog     *log,
 278        char            *buffer,
 279        xfs_daddr_t     first_blk,
 280        xfs_daddr_t     *last_blk,
 281        uint            cycle)
 282{
 283        char            *offset;
 284        xfs_daddr_t     mid_blk;
 285        xfs_daddr_t     end_blk;
 286        uint            mid_cycle;
 287        int             error;
 288
 289        end_blk = *last_blk;
 290        mid_blk = BLK_AVG(first_blk, end_blk);
 291        while (mid_blk != first_blk && mid_blk != end_blk) {
 292                error = xlog_bread(log, mid_blk, 1, buffer, &offset);
 293                if (error)
 294                        return error;
 295                mid_cycle = xlog_get_cycle(offset);
 296                if (mid_cycle == cycle)
 297                        end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
 298                else
 299                        first_blk = mid_blk; /* first_half_cycle == mid_cycle */
 300                mid_blk = BLK_AVG(first_blk, end_blk);
 301        }
 302        ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
 303               (mid_blk == end_blk && mid_blk-1 == first_blk));
 304
 305        *last_blk = end_blk;
 306
 307        return 0;
 308}
 309
 310/*
 311 * Check that a range of blocks does not contain stop_on_cycle_no.
 312 * Fill in *new_blk with the block offset where such a block is
 313 * found, or with -1 (an invalid block number) if there is no such
 314 * block in the range.  The scan needs to occur from front to back
 315 * and the pointer into the region must be updated since a later
 316 * routine will need to perform another test.
 317 */
 318STATIC int
 319xlog_find_verify_cycle(
 320        struct xlog     *log,
 321        xfs_daddr_t     start_blk,
 322        int             nbblks,
 323        uint            stop_on_cycle_no,
 324        xfs_daddr_t     *new_blk)
 325{
 326        xfs_daddr_t     i, j;
 327        uint            cycle;
 328        char            *buffer;
 329        xfs_daddr_t     bufblks;
 330        char            *buf = NULL;
 331        int             error = 0;
 332
 333        /*
 334         * Greedily allocate a buffer big enough to handle the full
 335         * range of basic blocks we'll be examining.  If that fails,
 336         * try a smaller size.  We need to be able to read at least
 337         * a log sector, or we're out of luck.
 338         */
 339        bufblks = 1 << ffs(nbblks);
 340        while (bufblks > log->l_logBBsize)
 341                bufblks >>= 1;
 342        while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
 343                bufblks >>= 1;
 344                if (bufblks < log->l_sectBBsize)
 345                        return -ENOMEM;
 346        }
 347
 348        for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
 349                int     bcount;
 350
 351                bcount = min(bufblks, (start_blk + nbblks - i));
 352
 353                error = xlog_bread(log, i, bcount, buffer, &buf);
 354                if (error)
 355                        goto out;
 356
 357                for (j = 0; j < bcount; j++) {
 358                        cycle = xlog_get_cycle(buf);
 359                        if (cycle == stop_on_cycle_no) {
 360                                *new_blk = i+j;
 361                                goto out;
 362                        }
 363
 364                        buf += BBSIZE;
 365                }
 366        }
 367
 368        *new_blk = -1;
 369
 370out:
 371        kmem_free(buffer);
 372        return error;
 373}
 374
 375static inline int
 376xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
 377{
 378        if (xfs_has_logv2(log->l_mp)) {
 379                int     h_size = be32_to_cpu(rh->h_size);
 380
 381                if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
 382                    h_size > XLOG_HEADER_CYCLE_SIZE)
 383                        return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
 384        }
 385        return 1;
 386}
 387
 388/*
 389 * Potentially backup over partial log record write.
 390 *
 391 * In the typical case, last_blk is the number of the block directly after
 392 * a good log record.  Therefore, we subtract one to get the block number
 393 * of the last block in the given buffer.  extra_bblks contains the number
 394 * of blocks we would have read on a previous read.  This happens when the
 395 * last log record is split over the end of the physical log.
 396 *
 397 * extra_bblks is the number of blocks potentially verified on a previous
 398 * call to this routine.
 399 */
 400STATIC int
 401xlog_find_verify_log_record(
 402        struct xlog             *log,
 403        xfs_daddr_t             start_blk,
 404        xfs_daddr_t             *last_blk,
 405        int                     extra_bblks)
 406{
 407        xfs_daddr_t             i;
 408        char                    *buffer;
 409        char                    *offset = NULL;
 410        xlog_rec_header_t       *head = NULL;
 411        int                     error = 0;
 412        int                     smallmem = 0;
 413        int                     num_blks = *last_blk - start_blk;
 414        int                     xhdrs;
 415
 416        ASSERT(start_blk != 0 || *last_blk != start_blk);
 417
 418        buffer = xlog_alloc_buffer(log, num_blks);
 419        if (!buffer) {
 420                buffer = xlog_alloc_buffer(log, 1);
 421                if (!buffer)
 422                        return -ENOMEM;
 423                smallmem = 1;
 424        } else {
 425                error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
 426                if (error)
 427                        goto out;
 428                offset += ((num_blks - 1) << BBSHIFT);
 429        }
 430
 431        for (i = (*last_blk) - 1; i >= 0; i--) {
 432                if (i < start_blk) {
 433                        /* valid log record not found */
 434                        xfs_warn(log->l_mp,
 435                "Log inconsistent (didn't find previous header)");
 436                        ASSERT(0);
 437                        error = -EFSCORRUPTED;
 438                        goto out;
 439                }
 440
 441                if (smallmem) {
 442                        error = xlog_bread(log, i, 1, buffer, &offset);
 443                        if (error)
 444                                goto out;
 445                }
 446
 447                head = (xlog_rec_header_t *)offset;
 448
 449                if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
 450                        break;
 451
 452                if (!smallmem)
 453                        offset -= BBSIZE;
 454        }
 455
 456        /*
 457         * We hit the beginning of the physical log & still no header.  Return
 458         * to caller.  If caller can handle a return of -1, then this routine
 459         * will be called again for the end of the physical log.
 460         */
 461        if (i == -1) {
 462                error = 1;
 463                goto out;
 464        }
 465
 466        /*
 467         * We have the final block of the good log (the first block
 468         * of the log record _before_ the head. So we check the uuid.
 469         */
 470        if ((error = xlog_header_check_mount(log->l_mp, head)))
 471                goto out;
 472
 473        /*
 474         * We may have found a log record header before we expected one.
 475         * last_blk will be the 1st block # with a given cycle #.  We may end
 476         * up reading an entire log record.  In this case, we don't want to
 477         * reset last_blk.  Only when last_blk points in the middle of a log
 478         * record do we update last_blk.
 479         */
 480        xhdrs = xlog_logrec_hblks(log, head);
 481
 482        if (*last_blk - i + extra_bblks !=
 483            BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
 484                *last_blk = i;
 485
 486out:
 487        kmem_free(buffer);
 488        return error;
 489}
 490
 491/*
 492 * Head is defined to be the point of the log where the next log write
 493 * could go.  This means that incomplete LR writes at the end are
 494 * eliminated when calculating the head.  We aren't guaranteed that previous
 495 * LR have complete transactions.  We only know that a cycle number of
 496 * current cycle number -1 won't be present in the log if we start writing
 497 * from our current block number.
 498 *
 499 * last_blk contains the block number of the first block with a given
 500 * cycle number.
 501 *
 502 * Return: zero if normal, non-zero if error.
 503 */
 504STATIC int
 505xlog_find_head(
 506        struct xlog     *log,
 507        xfs_daddr_t     *return_head_blk)
 508{
 509        char            *buffer;
 510        char            *offset;
 511        xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
 512        int             num_scan_bblks;
 513        uint            first_half_cycle, last_half_cycle;
 514        uint            stop_on_cycle;
 515        int             error, log_bbnum = log->l_logBBsize;
 516
 517        /* Is the end of the log device zeroed? */
 518        error = xlog_find_zeroed(log, &first_blk);
 519        if (error < 0) {
 520                xfs_warn(log->l_mp, "empty log check failed");
 521                return error;
 522        }
 523        if (error == 1) {
 524                *return_head_blk = first_blk;
 525
 526                /* Is the whole lot zeroed? */
 527                if (!first_blk) {
 528                        /* Linux XFS shouldn't generate totally zeroed logs -
 529                         * mkfs etc write a dummy unmount record to a fresh
 530                         * log so we can store the uuid in there
 531                         */
 532                        xfs_warn(log->l_mp, "totally zeroed log");
 533                }
 534
 535                return 0;
 536        }
 537
 538        first_blk = 0;                  /* get cycle # of 1st block */
 539        buffer = xlog_alloc_buffer(log, 1);
 540        if (!buffer)
 541                return -ENOMEM;
 542
 543        error = xlog_bread(log, 0, 1, buffer, &offset);
 544        if (error)
 545                goto out_free_buffer;
 546
 547        first_half_cycle = xlog_get_cycle(offset);
 548
 549        last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
 550        error = xlog_bread(log, last_blk, 1, buffer, &offset);
 551        if (error)
 552                goto out_free_buffer;
 553
 554        last_half_cycle = xlog_get_cycle(offset);
 555        ASSERT(last_half_cycle != 0);
 556
 557        /*
 558         * If the 1st half cycle number is equal to the last half cycle number,
 559         * then the entire log is stamped with the same cycle number.  In this
 560         * case, head_blk can't be set to zero (which makes sense).  The below
 561         * math doesn't work out properly with head_blk equal to zero.  Instead,
 562         * we set it to log_bbnum which is an invalid block number, but this
 563         * value makes the math correct.  If head_blk doesn't changed through
 564         * all the tests below, *head_blk is set to zero at the very end rather
 565         * than log_bbnum.  In a sense, log_bbnum and zero are the same block
 566         * in a circular file.
 567         */
 568        if (first_half_cycle == last_half_cycle) {
 569                /*
 570                 * In this case we believe that the entire log should have
 571                 * cycle number last_half_cycle.  We need to scan backwards
 572                 * from the end verifying that there are no holes still
 573                 * containing last_half_cycle - 1.  If we find such a hole,
 574                 * then the start of that hole will be the new head.  The
 575                 * simple case looks like
 576                 *        x | x ... | x - 1 | x
 577                 * Another case that fits this picture would be
 578                 *        x | x + 1 | x ... | x
 579                 * In this case the head really is somewhere at the end of the
 580                 * log, as one of the latest writes at the beginning was
 581                 * incomplete.
 582                 * One more case is
 583                 *        x | x + 1 | x ... | x - 1 | x
 584                 * This is really the combination of the above two cases, and
 585                 * the head has to end up at the start of the x-1 hole at the
 586                 * end of the log.
 587                 *
 588                 * In the 256k log case, we will read from the beginning to the
 589                 * end of the log and search for cycle numbers equal to x-1.
 590                 * We don't worry about the x+1 blocks that we encounter,
 591                 * because we know that they cannot be the head since the log
 592                 * started with x.
 593                 */
 594                head_blk = log_bbnum;
 595                stop_on_cycle = last_half_cycle - 1;
 596        } else {
 597                /*
 598                 * In this case we want to find the first block with cycle
 599                 * number matching last_half_cycle.  We expect the log to be
 600                 * some variation on
 601                 *        x + 1 ... | x ... | x
 602                 * The first block with cycle number x (last_half_cycle) will
 603                 * be where the new head belongs.  First we do a binary search
 604                 * for the first occurrence of last_half_cycle.  The binary
 605                 * search may not be totally accurate, so then we scan back
 606                 * from there looking for occurrences of last_half_cycle before
 607                 * us.  If that backwards scan wraps around the beginning of
 608                 * the log, then we look for occurrences of last_half_cycle - 1
 609                 * at the end of the log.  The cases we're looking for look
 610                 * like
 611                 *                               v binary search stopped here
 612                 *        x + 1 ... | x | x + 1 | x ... | x
 613                 *                   ^ but we want to locate this spot
 614                 * or
 615                 *        <---------> less than scan distance
 616                 *        x + 1 ... | x ... | x - 1 | x
 617                 *                           ^ we want to locate this spot
 618                 */
 619                stop_on_cycle = last_half_cycle;
 620                error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
 621                                last_half_cycle);
 622                if (error)
 623                        goto out_free_buffer;
 624        }
 625
 626        /*
 627         * Now validate the answer.  Scan back some number of maximum possible
 628         * blocks and make sure each one has the expected cycle number.  The
 629         * maximum is determined by the total possible amount of buffering
 630         * in the in-core log.  The following number can be made tighter if
 631         * we actually look at the block size of the filesystem.
 632         */
 633        num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
 634        if (head_blk >= num_scan_bblks) {
 635                /*
 636                 * We are guaranteed that the entire check can be performed
 637                 * in one buffer.
 638                 */
 639                start_blk = head_blk - num_scan_bblks;
 640                if ((error = xlog_find_verify_cycle(log,
 641                                                start_blk, num_scan_bblks,
 642                                                stop_on_cycle, &new_blk)))
 643                        goto out_free_buffer;
 644                if (new_blk != -1)
 645                        head_blk = new_blk;
 646        } else {                /* need to read 2 parts of log */
 647                /*
 648                 * We are going to scan backwards in the log in two parts.
 649                 * First we scan the physical end of the log.  In this part
 650                 * of the log, we are looking for blocks with cycle number
 651                 * last_half_cycle - 1.
 652                 * If we find one, then we know that the log starts there, as
 653                 * we've found a hole that didn't get written in going around
 654                 * the end of the physical log.  The simple case for this is
 655                 *        x + 1 ... | x ... | x - 1 | x
 656                 *        <---------> less than scan distance
 657                 * If all of the blocks at the end of the log have cycle number
 658                 * last_half_cycle, then we check the blocks at the start of
 659                 * the log looking for occurrences of last_half_cycle.  If we
 660                 * find one, then our current estimate for the location of the
 661                 * first occurrence of last_half_cycle is wrong and we move
 662                 * back to the hole we've found.  This case looks like
 663                 *        x + 1 ... | x | x + 1 | x ...
 664                 *                               ^ binary search stopped here
 665                 * Another case we need to handle that only occurs in 256k
 666                 * logs is
 667                 *        x + 1 ... | x ... | x+1 | x ...
 668                 *                   ^ binary search stops here
 669                 * In a 256k log, the scan at the end of the log will see the
 670                 * x + 1 blocks.  We need to skip past those since that is
 671                 * certainly not the head of the log.  By searching for
 672                 * last_half_cycle-1 we accomplish that.
 673                 */
 674                ASSERT(head_blk <= INT_MAX &&
 675                        (xfs_daddr_t) num_scan_bblks >= head_blk);
 676                start_blk = log_bbnum - (num_scan_bblks - head_blk);
 677                if ((error = xlog_find_verify_cycle(log, start_blk,
 678                                        num_scan_bblks - (int)head_blk,
 679                                        (stop_on_cycle - 1), &new_blk)))
 680                        goto out_free_buffer;
 681                if (new_blk != -1) {
 682                        head_blk = new_blk;
 683                        goto validate_head;
 684                }
 685
 686                /*
 687                 * Scan beginning of log now.  The last part of the physical
 688                 * log is good.  This scan needs to verify that it doesn't find
 689                 * the last_half_cycle.
 690                 */
 691                start_blk = 0;
 692                ASSERT(head_blk <= INT_MAX);
 693                if ((error = xlog_find_verify_cycle(log,
 694                                        start_blk, (int)head_blk,
 695                                        stop_on_cycle, &new_blk)))
 696                        goto out_free_buffer;
 697                if (new_blk != -1)
 698                        head_blk = new_blk;
 699        }
 700
 701validate_head:
 702        /*
 703         * Now we need to make sure head_blk is not pointing to a block in
 704         * the middle of a log record.
 705         */
 706        num_scan_bblks = XLOG_REC_SHIFT(log);
 707        if (head_blk >= num_scan_bblks) {
 708                start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
 709
 710                /* start ptr at last block ptr before head_blk */
 711                error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
 712                if (error == 1)
 713                        error = -EIO;
 714                if (error)
 715                        goto out_free_buffer;
 716        } else {
 717                start_blk = 0;
 718                ASSERT(head_blk <= INT_MAX);
 719                error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
 720                if (error < 0)
 721                        goto out_free_buffer;
 722                if (error == 1) {
 723                        /* We hit the beginning of the log during our search */
 724                        start_blk = log_bbnum - (num_scan_bblks - head_blk);
 725                        new_blk = log_bbnum;
 726                        ASSERT(start_blk <= INT_MAX &&
 727                                (xfs_daddr_t) log_bbnum-start_blk >= 0);
 728                        ASSERT(head_blk <= INT_MAX);
 729                        error = xlog_find_verify_log_record(log, start_blk,
 730                                                        &new_blk, (int)head_blk);
 731                        if (error == 1)
 732                                error = -EIO;
 733                        if (error)
 734                                goto out_free_buffer;
 735                        if (new_blk != log_bbnum)
 736                                head_blk = new_blk;
 737                } else if (error)
 738                        goto out_free_buffer;
 739        }
 740
 741        kmem_free(buffer);
 742        if (head_blk == log_bbnum)
 743                *return_head_blk = 0;
 744        else
 745                *return_head_blk = head_blk;
 746        /*
 747         * When returning here, we have a good block number.  Bad block
 748         * means that during a previous crash, we didn't have a clean break
 749         * from cycle number N to cycle number N-1.  In this case, we need
 750         * to find the first block with cycle number N-1.
 751         */
 752        return 0;
 753
 754out_free_buffer:
 755        kmem_free(buffer);
 756        if (error)
 757                xfs_warn(log->l_mp, "failed to find log head");
 758        return error;
 759}
 760
 761/*
 762 * Seek backwards in the log for log record headers.
 763 *
 764 * Given a starting log block, walk backwards until we find the provided number
 765 * of records or hit the provided tail block. The return value is the number of
 766 * records encountered or a negative error code. The log block and buffer
 767 * pointer of the last record seen are returned in rblk and rhead respectively.
 768 */
 769STATIC int
 770xlog_rseek_logrec_hdr(
 771        struct xlog             *log,
 772        xfs_daddr_t             head_blk,
 773        xfs_daddr_t             tail_blk,
 774        int                     count,
 775        char                    *buffer,
 776        xfs_daddr_t             *rblk,
 777        struct xlog_rec_header  **rhead,
 778        bool                    *wrapped)
 779{
 780        int                     i;
 781        int                     error;
 782        int                     found = 0;
 783        char                    *offset = NULL;
 784        xfs_daddr_t             end_blk;
 785
 786        *wrapped = false;
 787
 788        /*
 789         * Walk backwards from the head block until we hit the tail or the first
 790         * block in the log.
 791         */
 792        end_blk = head_blk > tail_blk ? tail_blk : 0;
 793        for (i = (int) head_blk - 1; i >= end_blk; i--) {
 794                error = xlog_bread(log, i, 1, buffer, &offset);
 795                if (error)
 796                        goto out_error;
 797
 798                if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 799                        *rblk = i;
 800                        *rhead = (struct xlog_rec_header *) offset;
 801                        if (++found == count)
 802                                break;
 803                }
 804        }
 805
 806        /*
 807         * If we haven't hit the tail block or the log record header count,
 808         * start looking again from the end of the physical log. Note that
 809         * callers can pass head == tail if the tail is not yet known.
 810         */
 811        if (tail_blk >= head_blk && found != count) {
 812                for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
 813                        error = xlog_bread(log, i, 1, buffer, &offset);
 814                        if (error)
 815                                goto out_error;
 816
 817                        if (*(__be32 *)offset ==
 818                            cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 819                                *wrapped = true;
 820                                *rblk = i;
 821                                *rhead = (struct xlog_rec_header *) offset;
 822                                if (++found == count)
 823                                        break;
 824                        }
 825                }
 826        }
 827
 828        return found;
 829
 830out_error:
 831        return error;
 832}
 833
 834/*
 835 * Seek forward in the log for log record headers.
 836 *
 837 * Given head and tail blocks, walk forward from the tail block until we find
 838 * the provided number of records or hit the head block. The return value is the
 839 * number of records encountered or a negative error code. The log block and
 840 * buffer pointer of the last record seen are returned in rblk and rhead
 841 * respectively.
 842 */
 843STATIC int
 844xlog_seek_logrec_hdr(
 845        struct xlog             *log,
 846        xfs_daddr_t             head_blk,
 847        xfs_daddr_t             tail_blk,
 848        int                     count,
 849        char                    *buffer,
 850        xfs_daddr_t             *rblk,
 851        struct xlog_rec_header  **rhead,
 852        bool                    *wrapped)
 853{
 854        int                     i;
 855        int                     error;
 856        int                     found = 0;
 857        char                    *offset = NULL;
 858        xfs_daddr_t             end_blk;
 859
 860        *wrapped = false;
 861
 862        /*
 863         * Walk forward from the tail block until we hit the head or the last
 864         * block in the log.
 865         */
 866        end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
 867        for (i = (int) tail_blk; i <= end_blk; i++) {
 868                error = xlog_bread(log, i, 1, buffer, &offset);
 869                if (error)
 870                        goto out_error;
 871
 872                if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 873                        *rblk = i;
 874                        *rhead = (struct xlog_rec_header *) offset;
 875                        if (++found == count)
 876                                break;
 877                }
 878        }
 879
 880        /*
 881         * If we haven't hit the head block or the log record header count,
 882         * start looking again from the start of the physical log.
 883         */
 884        if (tail_blk > head_blk && found != count) {
 885                for (i = 0; i < (int) head_blk; i++) {
 886                        error = xlog_bread(log, i, 1, buffer, &offset);
 887                        if (error)
 888                                goto out_error;
 889
 890                        if (*(__be32 *)offset ==
 891                            cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 892                                *wrapped = true;
 893                                *rblk = i;
 894                                *rhead = (struct xlog_rec_header *) offset;
 895                                if (++found == count)
 896                                        break;
 897                        }
 898                }
 899        }
 900
 901        return found;
 902
 903out_error:
 904        return error;
 905}
 906
 907/*
 908 * Calculate distance from head to tail (i.e., unused space in the log).
 909 */
 910static inline int
 911xlog_tail_distance(
 912        struct xlog     *log,
 913        xfs_daddr_t     head_blk,
 914        xfs_daddr_t     tail_blk)
 915{
 916        if (head_blk < tail_blk)
 917                return tail_blk - head_blk;
 918
 919        return tail_blk + (log->l_logBBsize - head_blk);
 920}
 921
 922/*
 923 * Verify the log tail. This is particularly important when torn or incomplete
 924 * writes have been detected near the front of the log and the head has been
 925 * walked back accordingly.
 926 *
 927 * We also have to handle the case where the tail was pinned and the head
 928 * blocked behind the tail right before a crash. If the tail had been pushed
 929 * immediately prior to the crash and the subsequent checkpoint was only
 930 * partially written, it's possible it overwrote the last referenced tail in the
 931 * log with garbage. This is not a coherency problem because the tail must have
 932 * been pushed before it can be overwritten, but appears as log corruption to
 933 * recovery because we have no way to know the tail was updated if the
 934 * subsequent checkpoint didn't write successfully.
 935 *
 936 * Therefore, CRC check the log from tail to head. If a failure occurs and the
 937 * offending record is within max iclog bufs from the head, walk the tail
 938 * forward and retry until a valid tail is found or corruption is detected out
 939 * of the range of a possible overwrite.
 940 */
 941STATIC int
 942xlog_verify_tail(
 943        struct xlog             *log,
 944        xfs_daddr_t             head_blk,
 945        xfs_daddr_t             *tail_blk,
 946        int                     hsize)
 947{
 948        struct xlog_rec_header  *thead;
 949        char                    *buffer;
 950        xfs_daddr_t             first_bad;
 951        int                     error = 0;
 952        bool                    wrapped;
 953        xfs_daddr_t             tmp_tail;
 954        xfs_daddr_t             orig_tail = *tail_blk;
 955
 956        buffer = xlog_alloc_buffer(log, 1);
 957        if (!buffer)
 958                return -ENOMEM;
 959
 960        /*
 961         * Make sure the tail points to a record (returns positive count on
 962         * success).
 963         */
 964        error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
 965                        &tmp_tail, &thead, &wrapped);
 966        if (error < 0)
 967                goto out;
 968        if (*tail_blk != tmp_tail)
 969                *tail_blk = tmp_tail;
 970
 971        /*
 972         * Run a CRC check from the tail to the head. We can't just check
 973         * MAX_ICLOGS records past the tail because the tail may point to stale
 974         * blocks cleared during the search for the head/tail. These blocks are
 975         * overwritten with zero-length records and thus record count is not a
 976         * reliable indicator of the iclog state before a crash.
 977         */
 978        first_bad = 0;
 979        error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
 980                                      XLOG_RECOVER_CRCPASS, &first_bad);
 981        while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
 982                int     tail_distance;
 983
 984                /*
 985                 * Is corruption within range of the head? If so, retry from
 986                 * the next record. Otherwise return an error.
 987                 */
 988                tail_distance = xlog_tail_distance(log, head_blk, first_bad);
 989                if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
 990                        break;
 991
 992                /* skip to the next record; returns positive count on success */
 993                error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
 994                                buffer, &tmp_tail, &thead, &wrapped);
 995                if (error < 0)
 996                        goto out;
 997
 998                *tail_blk = tmp_tail;
 999                first_bad = 0;
1000                error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1001                                              XLOG_RECOVER_CRCPASS, &first_bad);
1002        }
1003
1004        if (!error && *tail_blk != orig_tail)
1005                xfs_warn(log->l_mp,
1006                "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1007                         orig_tail, *tail_blk);
1008out:
1009        kmem_free(buffer);
1010        return error;
1011}
1012
1013/*
1014 * Detect and trim torn writes from the head of the log.
1015 *
1016 * Storage without sector atomicity guarantees can result in torn writes in the
1017 * log in the event of a crash. Our only means to detect this scenario is via
1018 * CRC verification. While we can't always be certain that CRC verification
1019 * failure is due to a torn write vs. an unrelated corruption, we do know that
1020 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1021 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1022 * the log and treat failures in this range as torn writes as a matter of
1023 * policy. In the event of CRC failure, the head is walked back to the last good
1024 * record in the log and the tail is updated from that record and verified.
1025 */
1026STATIC int
1027xlog_verify_head(
1028        struct xlog             *log,
1029        xfs_daddr_t             *head_blk,      /* in/out: unverified head */
1030        xfs_daddr_t             *tail_blk,      /* out: tail block */
1031        char                    *buffer,
1032        xfs_daddr_t             *rhead_blk,     /* start blk of last record */
1033        struct xlog_rec_header  **rhead,        /* ptr to last record */
1034        bool                    *wrapped)       /* last rec. wraps phys. log */
1035{
1036        struct xlog_rec_header  *tmp_rhead;
1037        char                    *tmp_buffer;
1038        xfs_daddr_t             first_bad;
1039        xfs_daddr_t             tmp_rhead_blk;
1040        int                     found;
1041        int                     error;
1042        bool                    tmp_wrapped;
1043
1044        /*
1045         * Check the head of the log for torn writes. Search backwards from the
1046         * head until we hit the tail or the maximum number of log record I/Os
1047         * that could have been in flight at one time. Use a temporary buffer so
1048         * we don't trash the rhead/buffer pointers from the caller.
1049         */
1050        tmp_buffer = xlog_alloc_buffer(log, 1);
1051        if (!tmp_buffer)
1052                return -ENOMEM;
1053        error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1054                                      XLOG_MAX_ICLOGS, tmp_buffer,
1055                                      &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1056        kmem_free(tmp_buffer);
1057        if (error < 0)
1058                return error;
1059
1060        /*
1061         * Now run a CRC verification pass over the records starting at the
1062         * block found above to the current head. If a CRC failure occurs, the
1063         * log block of the first bad record is saved in first_bad.
1064         */
1065        error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1066                                      XLOG_RECOVER_CRCPASS, &first_bad);
1067        if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1068                /*
1069                 * We've hit a potential torn write. Reset the error and warn
1070                 * about it.
1071                 */
1072                error = 0;
1073                xfs_warn(log->l_mp,
1074"Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1075                         first_bad, *head_blk);
1076
1077                /*
1078                 * Get the header block and buffer pointer for the last good
1079                 * record before the bad record.
1080                 *
1081                 * Note that xlog_find_tail() clears the blocks at the new head
1082                 * (i.e., the records with invalid CRC) if the cycle number
1083                 * matches the current cycle.
1084                 */
1085                found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1086                                buffer, rhead_blk, rhead, wrapped);
1087                if (found < 0)
1088                        return found;
1089                if (found == 0)         /* XXX: right thing to do here? */
1090                        return -EIO;
1091
1092                /*
1093                 * Reset the head block to the starting block of the first bad
1094                 * log record and set the tail block based on the last good
1095                 * record.
1096                 *
1097                 * Bail out if the updated head/tail match as this indicates
1098                 * possible corruption outside of the acceptable
1099                 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1100                 */
1101                *head_blk = first_bad;
1102                *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1103                if (*head_blk == *tail_blk) {
1104                        ASSERT(0);
1105                        return 0;
1106                }
1107        }
1108        if (error)
1109                return error;
1110
1111        return xlog_verify_tail(log, *head_blk, tail_blk,
1112                                be32_to_cpu((*rhead)->h_size));
1113}
1114
1115/*
1116 * We need to make sure we handle log wrapping properly, so we can't use the
1117 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1118 * log.
1119 *
1120 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1121 * operation here and cast it back to a 64 bit daddr on return.
1122 */
1123static inline xfs_daddr_t
1124xlog_wrap_logbno(
1125        struct xlog             *log,
1126        xfs_daddr_t             bno)
1127{
1128        int                     mod;
1129
1130        div_s64_rem(bno, log->l_logBBsize, &mod);
1131        return mod;
1132}
1133
1134/*
1135 * Check whether the head of the log points to an unmount record. In other
1136 * words, determine whether the log is clean. If so, update the in-core state
1137 * appropriately.
1138 */
1139static int
1140xlog_check_unmount_rec(
1141        struct xlog             *log,
1142        xfs_daddr_t             *head_blk,
1143        xfs_daddr_t             *tail_blk,
1144        struct xlog_rec_header  *rhead,
1145        xfs_daddr_t             rhead_blk,
1146        char                    *buffer,
1147        bool                    *clean)
1148{
1149        struct xlog_op_header   *op_head;
1150        xfs_daddr_t             umount_data_blk;
1151        xfs_daddr_t             after_umount_blk;
1152        int                     hblks;
1153        int                     error;
1154        char                    *offset;
1155
1156        *clean = false;
1157
1158        /*
1159         * Look for unmount record. If we find it, then we know there was a
1160         * clean unmount. Since 'i' could be the last block in the physical
1161         * log, we convert to a log block before comparing to the head_blk.
1162         *
1163         * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1164         * below. We won't want to clear the unmount record if there is one, so
1165         * we pass the lsn of the unmount record rather than the block after it.
1166         */
1167        hblks = xlog_logrec_hblks(log, rhead);
1168        after_umount_blk = xlog_wrap_logbno(log,
1169                        rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1170
1171        if (*head_blk == after_umount_blk &&
1172            be32_to_cpu(rhead->h_num_logops) == 1) {
1173                umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1174                error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1175                if (error)
1176                        return error;
1177
1178                op_head = (struct xlog_op_header *)offset;
1179                if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1180                        /*
1181                         * Set tail and last sync so that newly written log
1182                         * records will point recovery to after the current
1183                         * unmount record.
1184                         */
1185                        xlog_assign_atomic_lsn(&log->l_tail_lsn,
1186                                        log->l_curr_cycle, after_umount_blk);
1187                        xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1188                                        log->l_curr_cycle, after_umount_blk);
1189                        *tail_blk = after_umount_blk;
1190
1191                        *clean = true;
1192                }
1193        }
1194
1195        return 0;
1196}
1197
1198static void
1199xlog_set_state(
1200        struct xlog             *log,
1201        xfs_daddr_t             head_blk,
1202        struct xlog_rec_header  *rhead,
1203        xfs_daddr_t             rhead_blk,
1204        bool                    bump_cycle)
1205{
1206        /*
1207         * Reset log values according to the state of the log when we
1208         * crashed.  In the case where head_blk == 0, we bump curr_cycle
1209         * one because the next write starts a new cycle rather than
1210         * continuing the cycle of the last good log record.  At this
1211         * point we have guaranteed that all partial log records have been
1212         * accounted for.  Therefore, we know that the last good log record
1213         * written was complete and ended exactly on the end boundary
1214         * of the physical log.
1215         */
1216        log->l_prev_block = rhead_blk;
1217        log->l_curr_block = (int)head_blk;
1218        log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1219        if (bump_cycle)
1220                log->l_curr_cycle++;
1221        atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1222        atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1223        xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1224                                        BBTOB(log->l_curr_block));
1225        xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1226                                        BBTOB(log->l_curr_block));
1227}
1228
1229/*
1230 * Find the sync block number or the tail of the log.
1231 *
1232 * This will be the block number of the last record to have its
1233 * associated buffers synced to disk.  Every log record header has
1234 * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1235 * to get a sync block number.  The only concern is to figure out which
1236 * log record header to believe.
1237 *
1238 * The following algorithm uses the log record header with the largest
1239 * lsn.  The entire log record does not need to be valid.  We only care
1240 * that the header is valid.
1241 *
1242 * We could speed up search by using current head_blk buffer, but it is not
1243 * available.
1244 */
1245STATIC int
1246xlog_find_tail(
1247        struct xlog             *log,
1248        xfs_daddr_t             *head_blk,
1249        xfs_daddr_t             *tail_blk)
1250{
1251        xlog_rec_header_t       *rhead;
1252        char                    *offset = NULL;
1253        char                    *buffer;
1254        int                     error;
1255        xfs_daddr_t             rhead_blk;
1256        xfs_lsn_t               tail_lsn;
1257        bool                    wrapped = false;
1258        bool                    clean = false;
1259
1260        /*
1261         * Find previous log record
1262         */
1263        if ((error = xlog_find_head(log, head_blk)))
1264                return error;
1265        ASSERT(*head_blk < INT_MAX);
1266
1267        buffer = xlog_alloc_buffer(log, 1);
1268        if (!buffer)
1269                return -ENOMEM;
1270        if (*head_blk == 0) {                           /* special case */
1271                error = xlog_bread(log, 0, 1, buffer, &offset);
1272                if (error)
1273                        goto done;
1274
1275                if (xlog_get_cycle(offset) == 0) {
1276                        *tail_blk = 0;
1277                        /* leave all other log inited values alone */
1278                        goto done;
1279                }
1280        }
1281
1282        /*
1283         * Search backwards through the log looking for the log record header
1284         * block. This wraps all the way back around to the head so something is
1285         * seriously wrong if we can't find it.
1286         */
1287        error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1288                                      &rhead_blk, &rhead, &wrapped);
1289        if (error < 0)
1290                goto done;
1291        if (!error) {
1292                xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1293                error = -EFSCORRUPTED;
1294                goto done;
1295        }
1296        *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1297
1298        /*
1299         * Set the log state based on the current head record.
1300         */
1301        xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1302        tail_lsn = atomic64_read(&log->l_tail_lsn);
1303
1304        /*
1305         * Look for an unmount record at the head of the log. This sets the log
1306         * state to determine whether recovery is necessary.
1307         */
1308        error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1309                                       rhead_blk, buffer, &clean);
1310        if (error)
1311                goto done;
1312
1313        /*
1314         * Verify the log head if the log is not clean (e.g., we have anything
1315         * but an unmount record at the head). This uses CRC verification to
1316         * detect and trim torn writes. If discovered, CRC failures are
1317         * considered torn writes and the log head is trimmed accordingly.
1318         *
1319         * Note that we can only run CRC verification when the log is dirty
1320         * because there's no guarantee that the log data behind an unmount
1321         * record is compatible with the current architecture.
1322         */
1323        if (!clean) {
1324                xfs_daddr_t     orig_head = *head_blk;
1325
1326                error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1327                                         &rhead_blk, &rhead, &wrapped);
1328                if (error)
1329                        goto done;
1330
1331                /* update in-core state again if the head changed */
1332                if (*head_blk != orig_head) {
1333                        xlog_set_state(log, *head_blk, rhead, rhead_blk,
1334                                       wrapped);
1335                        tail_lsn = atomic64_read(&log->l_tail_lsn);
1336                        error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1337                                                       rhead, rhead_blk, buffer,
1338                                                       &clean);
1339                        if (error)
1340                                goto done;
1341                }
1342        }
1343
1344        /*
1345         * Note that the unmount was clean. If the unmount was not clean, we
1346         * need to know this to rebuild the superblock counters from the perag
1347         * headers if we have a filesystem using non-persistent counters.
1348         */
1349        if (clean)
1350                set_bit(XFS_OPSTATE_CLEAN, &log->l_mp->m_opstate);
1351
1352        /*
1353         * Make sure that there are no blocks in front of the head
1354         * with the same cycle number as the head.  This can happen
1355         * because we allow multiple outstanding log writes concurrently,
1356         * and the later writes might make it out before earlier ones.
1357         *
1358         * We use the lsn from before modifying it so that we'll never
1359         * overwrite the unmount record after a clean unmount.
1360         *
1361         * Do this only if we are going to recover the filesystem
1362         *
1363         * NOTE: This used to say "if (!readonly)"
1364         * However on Linux, we can & do recover a read-only filesystem.
1365         * We only skip recovery if NORECOVERY is specified on mount,
1366         * in which case we would not be here.
1367         *
1368         * But... if the -device- itself is readonly, just skip this.
1369         * We can't recover this device anyway, so it won't matter.
1370         */
1371        if (!xfs_readonly_buftarg(log->l_targ))
1372                error = xlog_clear_stale_blocks(log, tail_lsn);
1373
1374done:
1375        kmem_free(buffer);
1376
1377        if (error)
1378                xfs_warn(log->l_mp, "failed to locate log tail");
1379        return error;
1380}
1381
1382/*
1383 * Is the log zeroed at all?
1384 *
1385 * The last binary search should be changed to perform an X block read
1386 * once X becomes small enough.  You can then search linearly through
1387 * the X blocks.  This will cut down on the number of reads we need to do.
1388 *
1389 * If the log is partially zeroed, this routine will pass back the blkno
1390 * of the first block with cycle number 0.  It won't have a complete LR
1391 * preceding it.
1392 *
1393 * Return:
1394 *      0  => the log is completely written to
1395 *      1 => use *blk_no as the first block of the log
1396 *      <0 => error has occurred
1397 */
1398STATIC int
1399xlog_find_zeroed(
1400        struct xlog     *log,
1401        xfs_daddr_t     *blk_no)
1402{
1403        char            *buffer;
1404        char            *offset;
1405        uint            first_cycle, last_cycle;
1406        xfs_daddr_t     new_blk, last_blk, start_blk;
1407        xfs_daddr_t     num_scan_bblks;
1408        int             error, log_bbnum = log->l_logBBsize;
1409
1410        *blk_no = 0;
1411
1412        /* check totally zeroed log */
1413        buffer = xlog_alloc_buffer(log, 1);
1414        if (!buffer)
1415                return -ENOMEM;
1416        error = xlog_bread(log, 0, 1, buffer, &offset);
1417        if (error)
1418                goto out_free_buffer;
1419
1420        first_cycle = xlog_get_cycle(offset);
1421        if (first_cycle == 0) {         /* completely zeroed log */
1422                *blk_no = 0;
1423                kmem_free(buffer);
1424                return 1;
1425        }
1426
1427        /* check partially zeroed log */
1428        error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1429        if (error)
1430                goto out_free_buffer;
1431
1432        last_cycle = xlog_get_cycle(offset);
1433        if (last_cycle != 0) {          /* log completely written to */
1434                kmem_free(buffer);
1435                return 0;
1436        }
1437
1438        /* we have a partially zeroed log */
1439        last_blk = log_bbnum-1;
1440        error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1441        if (error)
1442                goto out_free_buffer;
1443
1444        /*
1445         * Validate the answer.  Because there is no way to guarantee that
1446         * the entire log is made up of log records which are the same size,
1447         * we scan over the defined maximum blocks.  At this point, the maximum
1448         * is not chosen to mean anything special.   XXXmiken
1449         */
1450        num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1451        ASSERT(num_scan_bblks <= INT_MAX);
1452
1453        if (last_blk < num_scan_bblks)
1454                num_scan_bblks = last_blk;
1455        start_blk = last_blk - num_scan_bblks;
1456
1457        /*
1458         * We search for any instances of cycle number 0 that occur before
1459         * our current estimate of the head.  What we're trying to detect is
1460         *        1 ... | 0 | 1 | 0...
1461         *                       ^ binary search ends here
1462         */
1463        if ((error = xlog_find_verify_cycle(log, start_blk,
1464                                         (int)num_scan_bblks, 0, &new_blk)))
1465                goto out_free_buffer;
1466        if (new_blk != -1)
1467                last_blk = new_blk;
1468
1469        /*
1470         * Potentially backup over partial log record write.  We don't need
1471         * to search the end of the log because we know it is zero.
1472         */
1473        error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1474        if (error == 1)
1475                error = -EIO;
1476        if (error)
1477                goto out_free_buffer;
1478
1479        *blk_no = last_blk;
1480out_free_buffer:
1481        kmem_free(buffer);
1482        if (error)
1483                return error;
1484        return 1;
1485}
1486
1487/*
1488 * These are simple subroutines used by xlog_clear_stale_blocks() below
1489 * to initialize a buffer full of empty log record headers and write
1490 * them into the log.
1491 */
1492STATIC void
1493xlog_add_record(
1494        struct xlog             *log,
1495        char                    *buf,
1496        int                     cycle,
1497        int                     block,
1498        int                     tail_cycle,
1499        int                     tail_block)
1500{
1501        xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1502
1503        memset(buf, 0, BBSIZE);
1504        recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1505        recp->h_cycle = cpu_to_be32(cycle);
1506        recp->h_version = cpu_to_be32(
1507                        xfs_has_logv2(log->l_mp) ? 2 : 1);
1508        recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1509        recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1510        recp->h_fmt = cpu_to_be32(XLOG_FMT);
1511        memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1512}
1513
1514STATIC int
1515xlog_write_log_records(
1516        struct xlog     *log,
1517        int             cycle,
1518        int             start_block,
1519        int             blocks,
1520        int             tail_cycle,
1521        int             tail_block)
1522{
1523        char            *offset;
1524        char            *buffer;
1525        int             balign, ealign;
1526        int             sectbb = log->l_sectBBsize;
1527        int             end_block = start_block + blocks;
1528        int             bufblks;
1529        int             error = 0;
1530        int             i, j = 0;
1531
1532        /*
1533         * Greedily allocate a buffer big enough to handle the full
1534         * range of basic blocks to be written.  If that fails, try
1535         * a smaller size.  We need to be able to write at least a
1536         * log sector, or we're out of luck.
1537         */
1538        bufblks = 1 << ffs(blocks);
1539        while (bufblks > log->l_logBBsize)
1540                bufblks >>= 1;
1541        while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1542                bufblks >>= 1;
1543                if (bufblks < sectbb)
1544                        return -ENOMEM;
1545        }
1546
1547        /* We may need to do a read at the start to fill in part of
1548         * the buffer in the starting sector not covered by the first
1549         * write below.
1550         */
1551        balign = round_down(start_block, sectbb);
1552        if (balign != start_block) {
1553                error = xlog_bread_noalign(log, start_block, 1, buffer);
1554                if (error)
1555                        goto out_free_buffer;
1556
1557                j = start_block - balign;
1558        }
1559
1560        for (i = start_block; i < end_block; i += bufblks) {
1561                int             bcount, endcount;
1562
1563                bcount = min(bufblks, end_block - start_block);
1564                endcount = bcount - j;
1565
1566                /* We may need to do a read at the end to fill in part of
1567                 * the buffer in the final sector not covered by the write.
1568                 * If this is the same sector as the above read, skip it.
1569                 */
1570                ealign = round_down(end_block, sectbb);
1571                if (j == 0 && (start_block + endcount > ealign)) {
1572                        error = xlog_bread_noalign(log, ealign, sectbb,
1573                                        buffer + BBTOB(ealign - start_block));
1574                        if (error)
1575                                break;
1576
1577                }
1578
1579                offset = buffer + xlog_align(log, start_block);
1580                for (; j < endcount; j++) {
1581                        xlog_add_record(log, offset, cycle, i+j,
1582                                        tail_cycle, tail_block);
1583                        offset += BBSIZE;
1584                }
1585                error = xlog_bwrite(log, start_block, endcount, buffer);
1586                if (error)
1587                        break;
1588                start_block += endcount;
1589                j = 0;
1590        }
1591
1592out_free_buffer:
1593        kmem_free(buffer);
1594        return error;
1595}
1596
1597/*
1598 * This routine is called to blow away any incomplete log writes out
1599 * in front of the log head.  We do this so that we won't become confused
1600 * if we come up, write only a little bit more, and then crash again.
1601 * If we leave the partial log records out there, this situation could
1602 * cause us to think those partial writes are valid blocks since they
1603 * have the current cycle number.  We get rid of them by overwriting them
1604 * with empty log records with the old cycle number rather than the
1605 * current one.
1606 *
1607 * The tail lsn is passed in rather than taken from
1608 * the log so that we will not write over the unmount record after a
1609 * clean unmount in a 512 block log.  Doing so would leave the log without
1610 * any valid log records in it until a new one was written.  If we crashed
1611 * during that time we would not be able to recover.
1612 */
1613STATIC int
1614xlog_clear_stale_blocks(
1615        struct xlog     *log,
1616        xfs_lsn_t       tail_lsn)
1617{
1618        int             tail_cycle, head_cycle;
1619        int             tail_block, head_block;
1620        int             tail_distance, max_distance;
1621        int             distance;
1622        int             error;
1623
1624        tail_cycle = CYCLE_LSN(tail_lsn);
1625        tail_block = BLOCK_LSN(tail_lsn);
1626        head_cycle = log->l_curr_cycle;
1627        head_block = log->l_curr_block;
1628
1629        /*
1630         * Figure out the distance between the new head of the log
1631         * and the tail.  We want to write over any blocks beyond the
1632         * head that we may have written just before the crash, but
1633         * we don't want to overwrite the tail of the log.
1634         */
1635        if (head_cycle == tail_cycle) {
1636                /*
1637                 * The tail is behind the head in the physical log,
1638                 * so the distance from the head to the tail is the
1639                 * distance from the head to the end of the log plus
1640                 * the distance from the beginning of the log to the
1641                 * tail.
1642                 */
1643                if (XFS_IS_CORRUPT(log->l_mp,
1644                                   head_block < tail_block ||
1645                                   head_block >= log->l_logBBsize))
1646                        return -EFSCORRUPTED;
1647                tail_distance = tail_block + (log->l_logBBsize - head_block);
1648        } else {
1649                /*
1650                 * The head is behind the tail in the physical log,
1651                 * so the distance from the head to the tail is just
1652                 * the tail block minus the head block.
1653                 */
1654                if (XFS_IS_CORRUPT(log->l_mp,
1655                                   head_block >= tail_block ||
1656                                   head_cycle != tail_cycle + 1))
1657                        return -EFSCORRUPTED;
1658                tail_distance = tail_block - head_block;
1659        }
1660
1661        /*
1662         * If the head is right up against the tail, we can't clear
1663         * anything.
1664         */
1665        if (tail_distance <= 0) {
1666                ASSERT(tail_distance == 0);
1667                return 0;
1668        }
1669
1670        max_distance = XLOG_TOTAL_REC_SHIFT(log);
1671        /*
1672         * Take the smaller of the maximum amount of outstanding I/O
1673         * we could have and the distance to the tail to clear out.
1674         * We take the smaller so that we don't overwrite the tail and
1675         * we don't waste all day writing from the head to the tail
1676         * for no reason.
1677         */
1678        max_distance = min(max_distance, tail_distance);
1679
1680        if ((head_block + max_distance) <= log->l_logBBsize) {
1681                /*
1682                 * We can stomp all the blocks we need to without
1683                 * wrapping around the end of the log.  Just do it
1684                 * in a single write.  Use the cycle number of the
1685                 * current cycle minus one so that the log will look like:
1686                 *     n ... | n - 1 ...
1687                 */
1688                error = xlog_write_log_records(log, (head_cycle - 1),
1689                                head_block, max_distance, tail_cycle,
1690                                tail_block);
1691                if (error)
1692                        return error;
1693        } else {
1694                /*
1695                 * We need to wrap around the end of the physical log in
1696                 * order to clear all the blocks.  Do it in two separate
1697                 * I/Os.  The first write should be from the head to the
1698                 * end of the physical log, and it should use the current
1699                 * cycle number minus one just like above.
1700                 */
1701                distance = log->l_logBBsize - head_block;
1702                error = xlog_write_log_records(log, (head_cycle - 1),
1703                                head_block, distance, tail_cycle,
1704                                tail_block);
1705
1706                if (error)
1707                        return error;
1708
1709                /*
1710                 * Now write the blocks at the start of the physical log.
1711                 * This writes the remainder of the blocks we want to clear.
1712                 * It uses the current cycle number since we're now on the
1713                 * same cycle as the head so that we get:
1714                 *    n ... n ... | n - 1 ...
1715                 *    ^^^^^ blocks we're writing
1716                 */
1717                distance = max_distance - (log->l_logBBsize - head_block);
1718                error = xlog_write_log_records(log, head_cycle, 0, distance,
1719                                tail_cycle, tail_block);
1720                if (error)
1721                        return error;
1722        }
1723
1724        return 0;
1725}
1726
1727/*
1728 * Release the recovered intent item in the AIL that matches the given intent
1729 * type and intent id.
1730 */
1731void
1732xlog_recover_release_intent(
1733        struct xlog             *log,
1734        unsigned short          intent_type,
1735        uint64_t                intent_id)
1736{
1737        struct xfs_ail_cursor   cur;
1738        struct xfs_log_item     *lip;
1739        struct xfs_ail          *ailp = log->l_ailp;
1740
1741        spin_lock(&ailp->ail_lock);
1742        for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
1743             lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
1744                if (lip->li_type != intent_type)
1745                        continue;
1746                if (!lip->li_ops->iop_match(lip, intent_id))
1747                        continue;
1748
1749                spin_unlock(&ailp->ail_lock);
1750                lip->li_ops->iop_release(lip);
1751                spin_lock(&ailp->ail_lock);
1752                break;
1753        }
1754
1755        xfs_trans_ail_cursor_done(&cur);
1756        spin_unlock(&ailp->ail_lock);
1757}
1758
1759int
1760xlog_recover_iget(
1761        struct xfs_mount        *mp,
1762        xfs_ino_t               ino,
1763        struct xfs_inode        **ipp)
1764{
1765        int                     error;
1766
1767        error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
1768        if (error)
1769                return error;
1770
1771        error = xfs_qm_dqattach(*ipp);
1772        if (error) {
1773                xfs_irele(*ipp);
1774                return error;
1775        }
1776
1777        if (VFS_I(*ipp)->i_nlink == 0)
1778                xfs_iflags_set(*ipp, XFS_IRECOVERY);
1779
1780        return 0;
1781}
1782
1783/******************************************************************************
1784 *
1785 *              Log recover routines
1786 *
1787 ******************************************************************************
1788 */
1789static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1790        &xlog_buf_item_ops,
1791        &xlog_inode_item_ops,
1792        &xlog_dquot_item_ops,
1793        &xlog_quotaoff_item_ops,
1794        &xlog_icreate_item_ops,
1795        &xlog_efi_item_ops,
1796        &xlog_efd_item_ops,
1797        &xlog_rui_item_ops,
1798        &xlog_rud_item_ops,
1799        &xlog_cui_item_ops,
1800        &xlog_cud_item_ops,
1801        &xlog_bui_item_ops,
1802        &xlog_bud_item_ops,
1803};
1804
1805static const struct xlog_recover_item_ops *
1806xlog_find_item_ops(
1807        struct xlog_recover_item                *item)
1808{
1809        unsigned int                            i;
1810
1811        for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1812                if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1813                        return xlog_recover_item_ops[i];
1814
1815        return NULL;
1816}
1817
1818/*
1819 * Sort the log items in the transaction.
1820 *
1821 * The ordering constraints are defined by the inode allocation and unlink
1822 * behaviour. The rules are:
1823 *
1824 *      1. Every item is only logged once in a given transaction. Hence it
1825 *         represents the last logged state of the item. Hence ordering is
1826 *         dependent on the order in which operations need to be performed so
1827 *         required initial conditions are always met.
1828 *
1829 *      2. Cancelled buffers are recorded in pass 1 in a separate table and
1830 *         there's nothing to replay from them so we can simply cull them
1831 *         from the transaction. However, we can't do that until after we've
1832 *         replayed all the other items because they may be dependent on the
1833 *         cancelled buffer and replaying the cancelled buffer can remove it
1834 *         form the cancelled buffer table. Hence they have tobe done last.
1835 *
1836 *      3. Inode allocation buffers must be replayed before inode items that
1837 *         read the buffer and replay changes into it. For filesystems using the
1838 *         ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1839 *         treated the same as inode allocation buffers as they create and
1840 *         initialise the buffers directly.
1841 *
1842 *      4. Inode unlink buffers must be replayed after inode items are replayed.
1843 *         This ensures that inodes are completely flushed to the inode buffer
1844 *         in a "free" state before we remove the unlinked inode list pointer.
1845 *
1846 * Hence the ordering needs to be inode allocation buffers first, inode items
1847 * second, inode unlink buffers third and cancelled buffers last.
1848 *
1849 * But there's a problem with that - we can't tell an inode allocation buffer
1850 * apart from a regular buffer, so we can't separate them. We can, however,
1851 * tell an inode unlink buffer from the others, and so we can separate them out
1852 * from all the other buffers and move them to last.
1853 *
1854 * Hence, 4 lists, in order from head to tail:
1855 *      - buffer_list for all buffers except cancelled/inode unlink buffers
1856 *      - item_list for all non-buffer items
1857 *      - inode_buffer_list for inode unlink buffers
1858 *      - cancel_list for the cancelled buffers
1859 *
1860 * Note that we add objects to the tail of the lists so that first-to-last
1861 * ordering is preserved within the lists. Adding objects to the head of the
1862 * list means when we traverse from the head we walk them in last-to-first
1863 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1864 * but for all other items there may be specific ordering that we need to
1865 * preserve.
1866 */
1867STATIC int
1868xlog_recover_reorder_trans(
1869        struct xlog             *log,
1870        struct xlog_recover     *trans,
1871        int                     pass)
1872{
1873        struct xlog_recover_item *item, *n;
1874        int                     error = 0;
1875        LIST_HEAD(sort_list);
1876        LIST_HEAD(cancel_list);
1877        LIST_HEAD(buffer_list);
1878        LIST_HEAD(inode_buffer_list);
1879        LIST_HEAD(item_list);
1880
1881        list_splice_init(&trans->r_itemq, &sort_list);
1882        list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1883                enum xlog_recover_reorder       fate = XLOG_REORDER_ITEM_LIST;
1884
1885                item->ri_ops = xlog_find_item_ops(item);
1886                if (!item->ri_ops) {
1887                        xfs_warn(log->l_mp,
1888                                "%s: unrecognized type of log operation (%d)",
1889                                __func__, ITEM_TYPE(item));
1890                        ASSERT(0);
1891                        /*
1892                         * return the remaining items back to the transaction
1893                         * item list so they can be freed in caller.
1894                         */
1895                        if (!list_empty(&sort_list))
1896                                list_splice_init(&sort_list, &trans->r_itemq);
1897                        error = -EFSCORRUPTED;
1898                        break;
1899                }
1900
1901                if (item->ri_ops->reorder)
1902                        fate = item->ri_ops->reorder(item);
1903
1904                switch (fate) {
1905                case XLOG_REORDER_BUFFER_LIST:
1906                        list_move_tail(&item->ri_list, &buffer_list);
1907                        break;
1908                case XLOG_REORDER_CANCEL_LIST:
1909                        trace_xfs_log_recover_item_reorder_head(log,
1910                                        trans, item, pass);
1911                        list_move(&item->ri_list, &cancel_list);
1912                        break;
1913                case XLOG_REORDER_INODE_BUFFER_LIST:
1914                        list_move(&item->ri_list, &inode_buffer_list);
1915                        break;
1916                case XLOG_REORDER_ITEM_LIST:
1917                        trace_xfs_log_recover_item_reorder_tail(log,
1918                                                        trans, item, pass);
1919                        list_move_tail(&item->ri_list, &item_list);
1920                        break;
1921                }
1922        }
1923
1924        ASSERT(list_empty(&sort_list));
1925        if (!list_empty(&buffer_list))
1926                list_splice(&buffer_list, &trans->r_itemq);
1927        if (!list_empty(&item_list))
1928                list_splice_tail(&item_list, &trans->r_itemq);
1929        if (!list_empty(&inode_buffer_list))
1930                list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1931        if (!list_empty(&cancel_list))
1932                list_splice_tail(&cancel_list, &trans->r_itemq);
1933        return error;
1934}
1935
1936void
1937xlog_buf_readahead(
1938        struct xlog             *log,
1939        xfs_daddr_t             blkno,
1940        uint                    len,
1941        const struct xfs_buf_ops *ops)
1942{
1943        if (!xlog_is_buffer_cancelled(log, blkno, len))
1944                xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1945}
1946
1947STATIC int
1948xlog_recover_items_pass2(
1949        struct xlog                     *log,
1950        struct xlog_recover             *trans,
1951        struct list_head                *buffer_list,
1952        struct list_head                *item_list)
1953{
1954        struct xlog_recover_item        *item;
1955        int                             error = 0;
1956
1957        list_for_each_entry(item, item_list, ri_list) {
1958                trace_xfs_log_recover_item_recover(log, trans, item,
1959                                XLOG_RECOVER_PASS2);
1960
1961                if (item->ri_ops->commit_pass2)
1962                        error = item->ri_ops->commit_pass2(log, buffer_list,
1963                                        item, trans->r_lsn);
1964                if (error)
1965                        return error;
1966        }
1967
1968        return error;
1969}
1970
1971/*
1972 * Perform the transaction.
1973 *
1974 * If the transaction modifies a buffer or inode, do it now.  Otherwise,
1975 * EFIs and EFDs get queued up by adding entries into the AIL for them.
1976 */
1977STATIC int
1978xlog_recover_commit_trans(
1979        struct xlog             *log,
1980        struct xlog_recover     *trans,
1981        int                     pass,
1982        struct list_head        *buffer_list)
1983{
1984        int                             error = 0;
1985        int                             items_queued = 0;
1986        struct xlog_recover_item        *item;
1987        struct xlog_recover_item        *next;
1988        LIST_HEAD                       (ra_list);
1989        LIST_HEAD                       (done_list);
1990
1991        #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
1992
1993        hlist_del_init(&trans->r_list);
1994
1995        error = xlog_recover_reorder_trans(log, trans, pass);
1996        if (error)
1997                return error;
1998
1999        list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
2000                trace_xfs_log_recover_item_recover(log, trans, item, pass);
2001
2002                switch (pass) {
2003                case XLOG_RECOVER_PASS1:
2004                        if (item->ri_ops->commit_pass1)
2005                                error = item->ri_ops->commit_pass1(log, item);
2006                        break;
2007                case XLOG_RECOVER_PASS2:
2008                        if (item->ri_ops->ra_pass2)
2009                                item->ri_ops->ra_pass2(log, item);
2010                        list_move_tail(&item->ri_list, &ra_list);
2011                        items_queued++;
2012                        if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
2013                                error = xlog_recover_items_pass2(log, trans,
2014                                                buffer_list, &ra_list);
2015                                list_splice_tail_init(&ra_list, &done_list);
2016                                items_queued = 0;
2017                        }
2018
2019                        break;
2020                default:
2021                        ASSERT(0);
2022                }
2023
2024                if (error)
2025                        goto out;
2026        }
2027
2028out:
2029        if (!list_empty(&ra_list)) {
2030                if (!error)
2031                        error = xlog_recover_items_pass2(log, trans,
2032                                        buffer_list, &ra_list);
2033                list_splice_tail_init(&ra_list, &done_list);
2034        }
2035
2036        if (!list_empty(&done_list))
2037                list_splice_init(&done_list, &trans->r_itemq);
2038
2039        return error;
2040}
2041
2042STATIC void
2043xlog_recover_add_item(
2044        struct list_head        *head)
2045{
2046        struct xlog_recover_item *item;
2047
2048        item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
2049        INIT_LIST_HEAD(&item->ri_list);
2050        list_add_tail(&item->ri_list, head);
2051}
2052
2053STATIC int
2054xlog_recover_add_to_cont_trans(
2055        struct xlog             *log,
2056        struct xlog_recover     *trans,
2057        char                    *dp,
2058        int                     len)
2059{
2060        struct xlog_recover_item *item;
2061        char                    *ptr, *old_ptr;
2062        int                     old_len;
2063
2064        /*
2065         * If the transaction is empty, the header was split across this and the
2066         * previous record. Copy the rest of the header.
2067         */
2068        if (list_empty(&trans->r_itemq)) {
2069                ASSERT(len <= sizeof(struct xfs_trans_header));
2070                if (len > sizeof(struct xfs_trans_header)) {
2071                        xfs_warn(log->l_mp, "%s: bad header length", __func__);
2072                        return -EFSCORRUPTED;
2073                }
2074
2075                xlog_recover_add_item(&trans->r_itemq);
2076                ptr = (char *)&trans->r_theader +
2077                                sizeof(struct xfs_trans_header) - len;
2078                memcpy(ptr, dp, len);
2079                return 0;
2080        }
2081
2082        /* take the tail entry */
2083        item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2084                          ri_list);
2085
2086        old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2087        old_len = item->ri_buf[item->ri_cnt-1].i_len;
2088
2089        ptr = kvrealloc(old_ptr, old_len, len + old_len, GFP_KERNEL);
2090        if (!ptr)
2091                return -ENOMEM;
2092        memcpy(&ptr[old_len], dp, len);
2093        item->ri_buf[item->ri_cnt-1].i_len += len;
2094        item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2095        trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2096        return 0;
2097}
2098
2099/*
2100 * The next region to add is the start of a new region.  It could be
2101 * a whole region or it could be the first part of a new region.  Because
2102 * of this, the assumption here is that the type and size fields of all
2103 * format structures fit into the first 32 bits of the structure.
2104 *
2105 * This works because all regions must be 32 bit aligned.  Therefore, we
2106 * either have both fields or we have neither field.  In the case we have
2107 * neither field, the data part of the region is zero length.  We only have
2108 * a log_op_header and can throw away the header since a new one will appear
2109 * later.  If we have at least 4 bytes, then we can determine how many regions
2110 * will appear in the current log item.
2111 */
2112STATIC int
2113xlog_recover_add_to_trans(
2114        struct xlog             *log,
2115        struct xlog_recover     *trans,
2116        char                    *dp,
2117        int                     len)
2118{
2119        struct xfs_inode_log_format     *in_f;                  /* any will do */
2120        struct xlog_recover_item *item;
2121        char                    *ptr;
2122
2123        if (!len)
2124                return 0;
2125        if (list_empty(&trans->r_itemq)) {
2126                /* we need to catch log corruptions here */
2127                if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2128                        xfs_warn(log->l_mp, "%s: bad header magic number",
2129                                __func__);
2130                        ASSERT(0);
2131                        return -EFSCORRUPTED;
2132                }
2133
2134                if (len > sizeof(struct xfs_trans_header)) {
2135                        xfs_warn(log->l_mp, "%s: bad header length", __func__);
2136                        ASSERT(0);
2137                        return -EFSCORRUPTED;
2138                }
2139
2140                /*
2141                 * The transaction header can be arbitrarily split across op
2142                 * records. If we don't have the whole thing here, copy what we
2143                 * do have and handle the rest in the next record.
2144                 */
2145                if (len == sizeof(struct xfs_trans_header))
2146                        xlog_recover_add_item(&trans->r_itemq);
2147                memcpy(&trans->r_theader, dp, len);
2148                return 0;
2149        }
2150
2151        ptr = kmem_alloc(len, 0);
2152        memcpy(ptr, dp, len);
2153        in_f = (struct xfs_inode_log_format *)ptr;
2154
2155        /* take the tail entry */
2156        item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2157                          ri_list);
2158        if (item->ri_total != 0 &&
2159             item->ri_total == item->ri_cnt) {
2160                /* tail item is in use, get a new one */
2161                xlog_recover_add_item(&trans->r_itemq);
2162                item = list_entry(trans->r_itemq.prev,
2163                                        struct xlog_recover_item, ri_list);
2164        }
2165
2166        if (item->ri_total == 0) {              /* first region to be added */
2167                if (in_f->ilf_size == 0 ||
2168                    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2169                        xfs_warn(log->l_mp,
2170                "bad number of regions (%d) in inode log format",
2171                                  in_f->ilf_size);
2172                        ASSERT(0);
2173                        kmem_free(ptr);
2174                        return -EFSCORRUPTED;
2175                }
2176
2177                item->ri_total = in_f->ilf_size;
2178                item->ri_buf =
2179                        kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
2180                                    0);
2181        }
2182
2183        if (item->ri_total <= item->ri_cnt) {
2184                xfs_warn(log->l_mp,
2185        "log item region count (%d) overflowed size (%d)",
2186                                item->ri_cnt, item->ri_total);
2187                ASSERT(0);
2188                kmem_free(ptr);
2189                return -EFSCORRUPTED;
2190        }
2191
2192        /* Description region is ri_buf[0] */
2193        item->ri_buf[item->ri_cnt].i_addr = ptr;
2194        item->ri_buf[item->ri_cnt].i_len  = len;
2195        item->ri_cnt++;
2196        trace_xfs_log_recover_item_add(log, trans, item, 0);
2197        return 0;
2198}
2199
2200/*
2201 * Free up any resources allocated by the transaction
2202 *
2203 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2204 */
2205STATIC void
2206xlog_recover_free_trans(
2207        struct xlog_recover     *trans)
2208{
2209        struct xlog_recover_item *item, *n;
2210        int                     i;
2211
2212        hlist_del_init(&trans->r_list);
2213
2214        list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2215                /* Free the regions in the item. */
2216                list_del(&item->ri_list);
2217                for (i = 0; i < item->ri_cnt; i++)
2218                        kmem_free(item->ri_buf[i].i_addr);
2219                /* Free the item itself */
2220                kmem_free(item->ri_buf);
2221                kmem_free(item);
2222        }
2223        /* Free the transaction recover structure */
2224        kmem_free(trans);
2225}
2226
2227/*
2228 * On error or completion, trans is freed.
2229 */
2230STATIC int
2231xlog_recovery_process_trans(
2232        struct xlog             *log,
2233        struct xlog_recover     *trans,
2234        char                    *dp,
2235        unsigned int            len,
2236        unsigned int            flags,
2237        int                     pass,
2238        struct list_head        *buffer_list)
2239{
2240        int                     error = 0;
2241        bool                    freeit = false;
2242
2243        /* mask off ophdr transaction container flags */
2244        flags &= ~XLOG_END_TRANS;
2245        if (flags & XLOG_WAS_CONT_TRANS)
2246                flags &= ~XLOG_CONTINUE_TRANS;
2247
2248        /*
2249         * Callees must not free the trans structure. We'll decide if we need to
2250         * free it or not based on the operation being done and it's result.
2251         */
2252        switch (flags) {
2253        /* expected flag values */
2254        case 0:
2255        case XLOG_CONTINUE_TRANS:
2256                error = xlog_recover_add_to_trans(log, trans, dp, len);
2257                break;
2258        case XLOG_WAS_CONT_TRANS:
2259                error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2260                break;
2261        case XLOG_COMMIT_TRANS:
2262                error = xlog_recover_commit_trans(log, trans, pass,
2263                                                  buffer_list);
2264                /* success or fail, we are now done with this transaction. */
2265                freeit = true;
2266                break;
2267
2268        /* unexpected flag values */
2269        case XLOG_UNMOUNT_TRANS:
2270                /* just skip trans */
2271                xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2272                freeit = true;
2273                break;
2274        case XLOG_START_TRANS:
2275        default:
2276                xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2277                ASSERT(0);
2278                error = -EFSCORRUPTED;
2279                break;
2280        }
2281        if (error || freeit)
2282                xlog_recover_free_trans(trans);
2283        return error;
2284}
2285
2286/*
2287 * Lookup the transaction recovery structure associated with the ID in the
2288 * current ophdr. If the transaction doesn't exist and the start flag is set in
2289 * the ophdr, then allocate a new transaction for future ID matches to find.
2290 * Either way, return what we found during the lookup - an existing transaction
2291 * or nothing.
2292 */
2293STATIC struct xlog_recover *
2294xlog_recover_ophdr_to_trans(
2295        struct hlist_head       rhash[],
2296        struct xlog_rec_header  *rhead,
2297        struct xlog_op_header   *ohead)
2298{
2299        struct xlog_recover     *trans;
2300        xlog_tid_t              tid;
2301        struct hlist_head       *rhp;
2302
2303        tid = be32_to_cpu(ohead->oh_tid);
2304        rhp = &rhash[XLOG_RHASH(tid)];
2305        hlist_for_each_entry(trans, rhp, r_list) {
2306                if (trans->r_log_tid == tid)
2307                        return trans;
2308        }
2309
2310        /*
2311         * skip over non-start transaction headers - we could be
2312         * processing slack space before the next transaction starts
2313         */
2314        if (!(ohead->oh_flags & XLOG_START_TRANS))
2315                return NULL;
2316
2317        ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2318
2319        /*
2320         * This is a new transaction so allocate a new recovery container to
2321         * hold the recovery ops that will follow.
2322         */
2323        trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
2324        trans->r_log_tid = tid;
2325        trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2326        INIT_LIST_HEAD(&trans->r_itemq);
2327        INIT_HLIST_NODE(&trans->r_list);
2328        hlist_add_head(&trans->r_list, rhp);
2329
2330        /*
2331         * Nothing more to do for this ophdr. Items to be added to this new
2332         * transaction will be in subsequent ophdr containers.
2333         */
2334        return NULL;
2335}
2336
2337STATIC int
2338xlog_recover_process_ophdr(
2339        struct xlog             *log,
2340        struct hlist_head       rhash[],
2341        struct xlog_rec_header  *rhead,
2342        struct xlog_op_header   *ohead,
2343        char                    *dp,
2344        char                    *end,
2345        int                     pass,
2346        struct list_head        *buffer_list)
2347{
2348        struct xlog_recover     *trans;
2349        unsigned int            len;
2350        int                     error;
2351
2352        /* Do we understand who wrote this op? */
2353        if (ohead->oh_clientid != XFS_TRANSACTION &&
2354            ohead->oh_clientid != XFS_LOG) {
2355                xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2356                        __func__, ohead->oh_clientid);
2357                ASSERT(0);
2358                return -EFSCORRUPTED;
2359        }
2360
2361        /*
2362         * Check the ophdr contains all the data it is supposed to contain.
2363         */
2364        len = be32_to_cpu(ohead->oh_len);
2365        if (dp + len > end) {
2366                xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2367                WARN_ON(1);
2368                return -EFSCORRUPTED;
2369        }
2370
2371        trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2372        if (!trans) {
2373                /* nothing to do, so skip over this ophdr */
2374                return 0;
2375        }
2376
2377        /*
2378         * The recovered buffer queue is drained only once we know that all
2379         * recovery items for the current LSN have been processed. This is
2380         * required because:
2381         *
2382         * - Buffer write submission updates the metadata LSN of the buffer.
2383         * - Log recovery skips items with a metadata LSN >= the current LSN of
2384         *   the recovery item.
2385         * - Separate recovery items against the same metadata buffer can share
2386         *   a current LSN. I.e., consider that the LSN of a recovery item is
2387         *   defined as the starting LSN of the first record in which its
2388         *   transaction appears, that a record can hold multiple transactions,
2389         *   and/or that a transaction can span multiple records.
2390         *
2391         * In other words, we are allowed to submit a buffer from log recovery
2392         * once per current LSN. Otherwise, we may incorrectly skip recovery
2393         * items and cause corruption.
2394         *
2395         * We don't know up front whether buffers are updated multiple times per
2396         * LSN. Therefore, track the current LSN of each commit log record as it
2397         * is processed and drain the queue when it changes. Use commit records
2398         * because they are ordered correctly by the logging code.
2399         */
2400        if (log->l_recovery_lsn != trans->r_lsn &&
2401            ohead->oh_flags & XLOG_COMMIT_TRANS) {
2402                error = xfs_buf_delwri_submit(buffer_list);
2403                if (error)
2404                        return error;
2405                log->l_recovery_lsn = trans->r_lsn;
2406        }
2407
2408        return xlog_recovery_process_trans(log, trans, dp, len,
2409                                           ohead->oh_flags, pass, buffer_list);
2410}
2411
2412/*
2413 * There are two valid states of the r_state field.  0 indicates that the
2414 * transaction structure is in a normal state.  We have either seen the
2415 * start of the transaction or the last operation we added was not a partial
2416 * operation.  If the last operation we added to the transaction was a
2417 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2418 *
2419 * NOTE: skip LRs with 0 data length.
2420 */
2421STATIC int
2422xlog_recover_process_data(
2423        struct xlog             *log,
2424        struct hlist_head       rhash[],
2425        struct xlog_rec_header  *rhead,
2426        char                    *dp,
2427        int                     pass,
2428        struct list_head        *buffer_list)
2429{
2430        struct xlog_op_header   *ohead;
2431        char                    *end;
2432        int                     num_logops;
2433        int                     error;
2434
2435        end = dp + be32_to_cpu(rhead->h_len);
2436        num_logops = be32_to_cpu(rhead->h_num_logops);
2437
2438        /* check the log format matches our own - else we can't recover */
2439        if (xlog_header_check_recover(log->l_mp, rhead))
2440                return -EIO;
2441
2442        trace_xfs_log_recover_record(log, rhead, pass);
2443        while ((dp < end) && num_logops) {
2444
2445                ohead = (struct xlog_op_header *)dp;
2446                dp += sizeof(*ohead);
2447                ASSERT(dp <= end);
2448
2449                /* errors will abort recovery */
2450                error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2451                                                   dp, end, pass, buffer_list);
2452                if (error)
2453                        return error;
2454
2455                dp += be32_to_cpu(ohead->oh_len);
2456                num_logops--;
2457        }
2458        return 0;
2459}
2460
2461/* Take all the collected deferred ops and finish them in order. */
2462static int
2463xlog_finish_defer_ops(
2464        struct xfs_mount        *mp,
2465        struct list_head        *capture_list)
2466{
2467        struct xfs_defer_capture *dfc, *next;
2468        struct xfs_trans        *tp;
2469        struct xfs_inode        *ip;
2470        int                     error = 0;
2471
2472        list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2473                struct xfs_trans_res    resv;
2474
2475                /*
2476                 * Create a new transaction reservation from the captured
2477                 * information.  Set logcount to 1 to force the new transaction
2478                 * to regrant every roll so that we can make forward progress
2479                 * in recovery no matter how full the log might be.
2480                 */
2481                resv.tr_logres = dfc->dfc_logres;
2482                resv.tr_logcount = 1;
2483                resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
2484
2485                error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
2486                                dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
2487                if (error) {
2488                        xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
2489                        return error;
2490                }
2491
2492                /*
2493                 * Transfer to this new transaction all the dfops we captured
2494                 * from recovering a single intent item.
2495                 */
2496                list_del_init(&dfc->dfc_list);
2497                xfs_defer_ops_continue(dfc, tp, &ip);
2498
2499                error = xfs_trans_commit(tp);
2500                if (ip) {
2501                        xfs_iunlock(ip, XFS_ILOCK_EXCL);
2502                        xfs_irele(ip);
2503                }
2504                if (error)
2505                        return error;
2506        }
2507
2508        ASSERT(list_empty(capture_list));
2509        return 0;
2510}
2511
2512/* Release all the captured defer ops and capture structures in this list. */
2513static void
2514xlog_abort_defer_ops(
2515        struct xfs_mount                *mp,
2516        struct list_head                *capture_list)
2517{
2518        struct xfs_defer_capture        *dfc;
2519        struct xfs_defer_capture        *next;
2520
2521        list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2522                list_del_init(&dfc->dfc_list);
2523                xfs_defer_ops_release(mp, dfc);
2524        }
2525}
2526/*
2527 * When this is called, all of the log intent items which did not have
2528 * corresponding log done items should be in the AIL.  What we do now
2529 * is update the data structures associated with each one.
2530 *
2531 * Since we process the log intent items in normal transactions, they
2532 * will be removed at some point after the commit.  This prevents us
2533 * from just walking down the list processing each one.  We'll use a
2534 * flag in the intent item to skip those that we've already processed
2535 * and use the AIL iteration mechanism's generation count to try to
2536 * speed this up at least a bit.
2537 *
2538 * When we start, we know that the intents are the only things in the
2539 * AIL.  As we process them, however, other items are added to the
2540 * AIL.
2541 */
2542STATIC int
2543xlog_recover_process_intents(
2544        struct xlog             *log)
2545{
2546        LIST_HEAD(capture_list);
2547        struct xfs_ail_cursor   cur;
2548        struct xfs_log_item     *lip;
2549        struct xfs_ail          *ailp;
2550        int                     error = 0;
2551#if defined(DEBUG) || defined(XFS_WARN)
2552        xfs_lsn_t               last_lsn;
2553#endif
2554
2555        ailp = log->l_ailp;
2556        spin_lock(&ailp->ail_lock);
2557#if defined(DEBUG) || defined(XFS_WARN)
2558        last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2559#endif
2560        for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2561             lip != NULL;
2562             lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
2563                /*
2564                 * We're done when we see something other than an intent.
2565                 * There should be no intents left in the AIL now.
2566                 */
2567                if (!xlog_item_is_intent(lip)) {
2568#ifdef DEBUG
2569                        for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2570                                ASSERT(!xlog_item_is_intent(lip));
2571#endif
2572                        break;
2573                }
2574
2575                /*
2576                 * We should never see a redo item with a LSN higher than
2577                 * the last transaction we found in the log at the start
2578                 * of recovery.
2579                 */
2580                ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
2581
2582                /*
2583                 * NOTE: If your intent processing routine can create more
2584                 * deferred ops, you /must/ attach them to the capture list in
2585                 * the recover routine or else those subsequent intents will be
2586                 * replayed in the wrong order!
2587                 */
2588                spin_unlock(&ailp->ail_lock);
2589                error = lip->li_ops->iop_recover(lip, &capture_list);
2590                spin_lock(&ailp->ail_lock);
2591                if (error) {
2592                        trace_xlog_intent_recovery_failed(log->l_mp, error,
2593                                        lip->li_ops->iop_recover);
2594                        break;
2595                }
2596        }
2597
2598        xfs_trans_ail_cursor_done(&cur);
2599        spin_unlock(&ailp->ail_lock);
2600        if (error)
2601                goto err;
2602
2603        error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2604        if (error)
2605                goto err;
2606
2607        return 0;
2608err:
2609        xlog_abort_defer_ops(log->l_mp, &capture_list);
2610        return error;
2611}
2612
2613/*
2614 * A cancel occurs when the mount has failed and we're bailing out.
2615 * Release all pending log intent items so they don't pin the AIL.
2616 */
2617STATIC void
2618xlog_recover_cancel_intents(
2619        struct xlog             *log)
2620{
2621        struct xfs_log_item     *lip;
2622        struct xfs_ail_cursor   cur;
2623        struct xfs_ail          *ailp;
2624
2625        ailp = log->l_ailp;
2626        spin_lock(&ailp->ail_lock);
2627        lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2628        while (lip != NULL) {
2629                /*
2630                 * We're done when we see something other than an intent.
2631                 * There should be no intents left in the AIL now.
2632                 */
2633                if (!xlog_item_is_intent(lip)) {
2634#ifdef DEBUG
2635                        for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2636                                ASSERT(!xlog_item_is_intent(lip));
2637#endif
2638                        break;
2639                }
2640
2641                spin_unlock(&ailp->ail_lock);
2642                lip->li_ops->iop_release(lip);
2643                spin_lock(&ailp->ail_lock);
2644                lip = xfs_trans_ail_cursor_next(ailp, &cur);
2645        }
2646
2647        xfs_trans_ail_cursor_done(&cur);
2648        spin_unlock(&ailp->ail_lock);
2649}
2650
2651/*
2652 * This routine performs a transaction to null out a bad inode pointer
2653 * in an agi unlinked inode hash bucket.
2654 */
2655STATIC void
2656xlog_recover_clear_agi_bucket(
2657        xfs_mount_t     *mp,
2658        xfs_agnumber_t  agno,
2659        int             bucket)
2660{
2661        xfs_trans_t     *tp;
2662        xfs_agi_t       *agi;
2663        struct xfs_buf  *agibp;
2664        int             offset;
2665        int             error;
2666
2667        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
2668        if (error)
2669                goto out_error;
2670
2671        error = xfs_read_agi(mp, tp, agno, &agibp);
2672        if (error)
2673                goto out_abort;
2674
2675        agi = agibp->b_addr;
2676        agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2677        offset = offsetof(xfs_agi_t, agi_unlinked) +
2678                 (sizeof(xfs_agino_t) * bucket);
2679        xfs_trans_log_buf(tp, agibp, offset,
2680                          (offset + sizeof(xfs_agino_t) - 1));
2681
2682        error = xfs_trans_commit(tp);
2683        if (error)
2684                goto out_error;
2685        return;
2686
2687out_abort:
2688        xfs_trans_cancel(tp);
2689out_error:
2690        xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
2691        return;
2692}
2693
2694STATIC xfs_agino_t
2695xlog_recover_process_one_iunlink(
2696        struct xfs_mount                *mp,
2697        xfs_agnumber_t                  agno,
2698        xfs_agino_t                     agino,
2699        int                             bucket)
2700{
2701        struct xfs_buf                  *ibp;
2702        struct xfs_dinode               *dip;
2703        struct xfs_inode                *ip;
2704        xfs_ino_t                       ino;
2705        int                             error;
2706
2707        ino = XFS_AGINO_TO_INO(mp, agno, agino);
2708        error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
2709        if (error)
2710                goto fail;
2711
2712        /*
2713         * Get the on disk inode to find the next inode in the bucket.
2714         */
2715        error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &ibp);
2716        if (error)
2717                goto fail_iput;
2718        dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
2719
2720        xfs_iflags_clear(ip, XFS_IRECOVERY);
2721        ASSERT(VFS_I(ip)->i_nlink == 0);
2722        ASSERT(VFS_I(ip)->i_mode != 0);
2723
2724        /* setup for the next pass */
2725        agino = be32_to_cpu(dip->di_next_unlinked);
2726        xfs_buf_relse(ibp);
2727
2728        xfs_irele(ip);
2729        return agino;
2730
2731 fail_iput:
2732        xfs_irele(ip);
2733 fail:
2734        /*
2735         * We can't read in the inode this bucket points to, or this inode
2736         * is messed up.  Just ditch this bucket of inodes.  We will lose
2737         * some inodes and space, but at least we won't hang.
2738         *
2739         * Call xlog_recover_clear_agi_bucket() to perform a transaction to
2740         * clear the inode pointer in the bucket.
2741         */
2742        xlog_recover_clear_agi_bucket(mp, agno, bucket);
2743        return NULLAGINO;
2744}
2745
2746/*
2747 * Recover AGI unlinked lists
2748 *
2749 * This is called during recovery to process any inodes which we unlinked but
2750 * not freed when the system crashed.  These inodes will be on the lists in the
2751 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2752 * any inodes found on the lists. Each inode is removed from the lists when it
2753 * has been fully truncated and is freed. The freeing of the inode and its
2754 * removal from the list must be atomic.
2755 *
2756 * If everything we touch in the agi processing loop is already in memory, this
2757 * loop can hold the cpu for a long time. It runs without lock contention,
2758 * memory allocation contention, the need wait for IO, etc, and so will run
2759 * until we either run out of inodes to process, run low on memory or we run out
2760 * of log space.
2761 *
2762 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2763 * and can prevent other filesystem work (such as CIL pushes) from running. This
2764 * can lead to deadlocks if the recovery process runs out of log reservation
2765 * space. Hence we need to yield the CPU when there is other kernel work
2766 * scheduled on this CPU to ensure other scheduled work can run without undue
2767 * latency.
2768 */
2769STATIC void
2770xlog_recover_process_iunlinks(
2771        struct xlog     *log)
2772{
2773        struct xfs_mount        *mp = log->l_mp;
2774        struct xfs_perag        *pag;
2775        xfs_agnumber_t          agno;
2776        struct xfs_agi          *agi;
2777        struct xfs_buf          *agibp;
2778        xfs_agino_t             agino;
2779        int                     bucket;
2780        int                     error;
2781
2782        for_each_perag(mp, agno, pag) {
2783                error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
2784                if (error) {
2785                        /*
2786                         * AGI is b0rked. Don't process it.
2787                         *
2788                         * We should probably mark the filesystem as corrupt
2789                         * after we've recovered all the ag's we can....
2790                         */
2791                        continue;
2792                }
2793                /*
2794                 * Unlock the buffer so that it can be acquired in the normal
2795                 * course of the transaction to truncate and free each inode.
2796                 * Because we are not racing with anyone else here for the AGI
2797                 * buffer, we don't even need to hold it locked to read the
2798                 * initial unlinked bucket entries out of the buffer. We keep
2799                 * buffer reference though, so that it stays pinned in memory
2800                 * while we need the buffer.
2801                 */
2802                agi = agibp->b_addr;
2803                xfs_buf_unlock(agibp);
2804
2805                for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2806                        agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2807                        while (agino != NULLAGINO) {
2808                                agino = xlog_recover_process_one_iunlink(mp,
2809                                                pag->pag_agno, agino, bucket);
2810                                cond_resched();
2811                        }
2812                }
2813                xfs_buf_rele(agibp);
2814        }
2815
2816        /*
2817         * Flush the pending unlinked inodes to ensure that the inactivations
2818         * are fully completed on disk and the incore inodes can be reclaimed
2819         * before we signal that recovery is complete.
2820         */
2821        xfs_inodegc_flush(mp);
2822}
2823
2824STATIC void
2825xlog_unpack_data(
2826        struct xlog_rec_header  *rhead,
2827        char                    *dp,
2828        struct xlog             *log)
2829{
2830        int                     i, j, k;
2831
2832        for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2833                  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
2834                *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2835                dp += BBSIZE;
2836        }
2837
2838        if (xfs_has_logv2(log->l_mp)) {
2839                xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2840                for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2841                        j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2842                        k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2843                        *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
2844                        dp += BBSIZE;
2845                }
2846        }
2847}
2848
2849/*
2850 * CRC check, unpack and process a log record.
2851 */
2852STATIC int
2853xlog_recover_process(
2854        struct xlog             *log,
2855        struct hlist_head       rhash[],
2856        struct xlog_rec_header  *rhead,
2857        char                    *dp,
2858        int                     pass,
2859        struct list_head        *buffer_list)
2860{
2861        __le32                  old_crc = rhead->h_crc;
2862        __le32                  crc;
2863
2864        crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2865
2866        /*
2867         * Nothing else to do if this is a CRC verification pass. Just return
2868         * if this a record with a non-zero crc. Unfortunately, mkfs always
2869         * sets old_crc to 0 so we must consider this valid even on v5 supers.
2870         * Otherwise, return EFSBADCRC on failure so the callers up the stack
2871         * know precisely what failed.
2872         */
2873        if (pass == XLOG_RECOVER_CRCPASS) {
2874                if (old_crc && crc != old_crc)
2875                        return -EFSBADCRC;
2876                return 0;
2877        }
2878
2879        /*
2880         * We're in the normal recovery path. Issue a warning if and only if the
2881         * CRC in the header is non-zero. This is an advisory warning and the
2882         * zero CRC check prevents warnings from being emitted when upgrading
2883         * the kernel from one that does not add CRCs by default.
2884         */
2885        if (crc != old_crc) {
2886                if (old_crc || xfs_has_crc(log->l_mp)) {
2887                        xfs_alert(log->l_mp,
2888                "log record CRC mismatch: found 0x%x, expected 0x%x.",
2889                                        le32_to_cpu(old_crc),
2890                                        le32_to_cpu(crc));
2891                        xfs_hex_dump(dp, 32);
2892                }
2893
2894                /*
2895                 * If the filesystem is CRC enabled, this mismatch becomes a
2896                 * fatal log corruption failure.
2897                 */
2898                if (xfs_has_crc(log->l_mp)) {
2899                        XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2900                        return -EFSCORRUPTED;
2901                }
2902        }
2903
2904        xlog_unpack_data(rhead, dp, log);
2905
2906        return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2907                                         buffer_list);
2908}
2909
2910STATIC int
2911xlog_valid_rec_header(
2912        struct xlog             *log,
2913        struct xlog_rec_header  *rhead,
2914        xfs_daddr_t             blkno,
2915        int                     bufsize)
2916{
2917        int                     hlen;
2918
2919        if (XFS_IS_CORRUPT(log->l_mp,
2920                           rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2921                return -EFSCORRUPTED;
2922        if (XFS_IS_CORRUPT(log->l_mp,
2923                           (!rhead->h_version ||
2924                           (be32_to_cpu(rhead->h_version) &
2925                            (~XLOG_VERSION_OKBITS))))) {
2926                xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2927                        __func__, be32_to_cpu(rhead->h_version));
2928                return -EFSCORRUPTED;
2929        }
2930
2931        /*
2932         * LR body must have data (or it wouldn't have been written)
2933         * and h_len must not be greater than LR buffer size.
2934         */
2935        hlen = be32_to_cpu(rhead->h_len);
2936        if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
2937                return -EFSCORRUPTED;
2938
2939        if (XFS_IS_CORRUPT(log->l_mp,
2940                           blkno > log->l_logBBsize || blkno > INT_MAX))
2941                return -EFSCORRUPTED;
2942        return 0;
2943}
2944
2945/*
2946 * Read the log from tail to head and process the log records found.
2947 * Handle the two cases where the tail and head are in the same cycle
2948 * and where the active portion of the log wraps around the end of
2949 * the physical log separately.  The pass parameter is passed through
2950 * to the routines called to process the data and is not looked at
2951 * here.
2952 */
2953STATIC int
2954xlog_do_recovery_pass(
2955        struct xlog             *log,
2956        xfs_daddr_t             head_blk,
2957        xfs_daddr_t             tail_blk,
2958        int                     pass,
2959        xfs_daddr_t             *first_bad)     /* out: first bad log rec */
2960{
2961        xlog_rec_header_t       *rhead;
2962        xfs_daddr_t             blk_no, rblk_no;
2963        xfs_daddr_t             rhead_blk;
2964        char                    *offset;
2965        char                    *hbp, *dbp;
2966        int                     error = 0, h_size, h_len;
2967        int                     error2 = 0;
2968        int                     bblks, split_bblks;
2969        int                     hblks, split_hblks, wrapped_hblks;
2970        int                     i;
2971        struct hlist_head       rhash[XLOG_RHASH_SIZE];
2972        LIST_HEAD               (buffer_list);
2973
2974        ASSERT(head_blk != tail_blk);
2975        blk_no = rhead_blk = tail_blk;
2976
2977        for (i = 0; i < XLOG_RHASH_SIZE; i++)
2978                INIT_HLIST_HEAD(&rhash[i]);
2979
2980        /*
2981         * Read the header of the tail block and get the iclog buffer size from
2982         * h_size.  Use this to tell how many sectors make up the log header.
2983         */
2984        if (xfs_has_logv2(log->l_mp)) {
2985                /*
2986                 * When using variable length iclogs, read first sector of
2987                 * iclog header and extract the header size from it.  Get a
2988                 * new hbp that is the correct size.
2989                 */
2990                hbp = xlog_alloc_buffer(log, 1);
2991                if (!hbp)
2992                        return -ENOMEM;
2993
2994                error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2995                if (error)
2996                        goto bread_err1;
2997
2998                rhead = (xlog_rec_header_t *)offset;
2999
3000                /*
3001                 * xfsprogs has a bug where record length is based on lsunit but
3002                 * h_size (iclog size) is hardcoded to 32k. Now that we
3003                 * unconditionally CRC verify the unmount record, this means the
3004                 * log buffer can be too small for the record and cause an
3005                 * overrun.
3006                 *
3007                 * Detect this condition here. Use lsunit for the buffer size as
3008                 * long as this looks like the mkfs case. Otherwise, return an
3009                 * error to avoid a buffer overrun.
3010                 */
3011                h_size = be32_to_cpu(rhead->h_size);
3012                h_len = be32_to_cpu(rhead->h_len);
3013                if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
3014                    rhead->h_num_logops == cpu_to_be32(1)) {
3015                        xfs_warn(log->l_mp,
3016                "invalid iclog size (%d bytes), using lsunit (%d bytes)",
3017                                 h_size, log->l_mp->m_logbsize);
3018                        h_size = log->l_mp->m_logbsize;
3019                }
3020
3021                error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
3022                if (error)
3023                        goto bread_err1;
3024
3025                hblks = xlog_logrec_hblks(log, rhead);
3026                if (hblks != 1) {
3027                        kmem_free(hbp);
3028                        hbp = xlog_alloc_buffer(log, hblks);
3029                }
3030        } else {
3031                ASSERT(log->l_sectBBsize == 1);
3032                hblks = 1;
3033                hbp = xlog_alloc_buffer(log, 1);
3034                h_size = XLOG_BIG_RECORD_BSIZE;
3035        }
3036
3037        if (!hbp)
3038                return -ENOMEM;
3039        dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3040        if (!dbp) {
3041                kmem_free(hbp);
3042                return -ENOMEM;
3043        }
3044
3045        memset(rhash, 0, sizeof(rhash));
3046        if (tail_blk > head_blk) {
3047                /*
3048                 * Perform recovery around the end of the physical log.
3049                 * When the head is not on the same cycle number as the tail,
3050                 * we can't do a sequential recovery.
3051                 */
3052                while (blk_no < log->l_logBBsize) {
3053                        /*
3054                         * Check for header wrapping around physical end-of-log
3055                         */
3056                        offset = hbp;
3057                        split_hblks = 0;
3058                        wrapped_hblks = 0;
3059                        if (blk_no + hblks <= log->l_logBBsize) {
3060                                /* Read header in one read */
3061                                error = xlog_bread(log, blk_no, hblks, hbp,
3062                                                   &offset);
3063                                if (error)
3064                                        goto bread_err2;
3065                        } else {
3066                                /* This LR is split across physical log end */
3067                                if (blk_no != log->l_logBBsize) {
3068                                        /* some data before physical log end */
3069                                        ASSERT(blk_no <= INT_MAX);
3070                                        split_hblks = log->l_logBBsize - (int)blk_no;
3071                                        ASSERT(split_hblks > 0);
3072                                        error = xlog_bread(log, blk_no,
3073                                                           split_hblks, hbp,
3074                                                           &offset);
3075                                        if (error)
3076                                                goto bread_err2;
3077                                }
3078
3079                                /*
3080                                 * Note: this black magic still works with
3081                                 * large sector sizes (non-512) only because:
3082                                 * - we increased the buffer size originally
3083                                 *   by 1 sector giving us enough extra space
3084                                 *   for the second read;
3085                                 * - the log start is guaranteed to be sector
3086                                 *   aligned;
3087                                 * - we read the log end (LR header start)
3088                                 *   _first_, then the log start (LR header end)
3089                                 *   - order is important.
3090                                 */
3091                                wrapped_hblks = hblks - split_hblks;
3092                                error = xlog_bread_noalign(log, 0,
3093                                                wrapped_hblks,
3094                                                offset + BBTOB(split_hblks));
3095                                if (error)
3096                                        goto bread_err2;
3097                        }
3098                        rhead = (xlog_rec_header_t *)offset;
3099                        error = xlog_valid_rec_header(log, rhead,
3100                                        split_hblks ? blk_no : 0, h_size);
3101                        if (error)
3102                                goto bread_err2;
3103
3104                        bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3105                        blk_no += hblks;
3106
3107                        /*
3108                         * Read the log record data in multiple reads if it
3109                         * wraps around the end of the log. Note that if the
3110                         * header already wrapped, blk_no could point past the
3111                         * end of the log. The record data is contiguous in
3112                         * that case.
3113                         */
3114                        if (blk_no + bblks <= log->l_logBBsize ||
3115                            blk_no >= log->l_logBBsize) {
3116                                rblk_no = xlog_wrap_logbno(log, blk_no);
3117                                error = xlog_bread(log, rblk_no, bblks, dbp,
3118                                                   &offset);
3119                                if (error)
3120                                        goto bread_err2;
3121                        } else {
3122                                /* This log record is split across the
3123                                 * physical end of log */
3124                                offset = dbp;
3125                                split_bblks = 0;
3126                                if (blk_no != log->l_logBBsize) {
3127                                        /* some data is before the physical
3128                                         * end of log */
3129                                        ASSERT(!wrapped_hblks);
3130                                        ASSERT(blk_no <= INT_MAX);
3131                                        split_bblks =
3132                                                log->l_logBBsize - (int)blk_no;
3133                                        ASSERT(split_bblks > 0);
3134                                        error = xlog_bread(log, blk_no,
3135                                                        split_bblks, dbp,
3136                                                        &offset);
3137                                        if (error)
3138                                                goto bread_err2;
3139                                }
3140
3141                                /*
3142                                 * Note: this black magic still works with
3143                                 * large sector sizes (non-512) only because:
3144                                 * - we increased the buffer size originally
3145                                 *   by 1 sector giving us enough extra space
3146                                 *   for the second read;
3147                                 * - the log start is guaranteed to be sector
3148                                 *   aligned;
3149                                 * - we read the log end (LR header start)
3150                                 *   _first_, then the log start (LR header end)
3151                                 *   - order is important.
3152                                 */
3153                                error = xlog_bread_noalign(log, 0,
3154                                                bblks - split_bblks,
3155                                                offset + BBTOB(split_bblks));
3156                                if (error)
3157                                        goto bread_err2;
3158                        }
3159
3160                        error = xlog_recover_process(log, rhash, rhead, offset,
3161                                                     pass, &buffer_list);
3162                        if (error)
3163                                goto bread_err2;
3164
3165                        blk_no += bblks;
3166                        rhead_blk = blk_no;
3167                }
3168
3169                ASSERT(blk_no >= log->l_logBBsize);
3170                blk_no -= log->l_logBBsize;
3171                rhead_blk = blk_no;
3172        }
3173
3174        /* read first part of physical log */
3175        while (blk_no < head_blk) {
3176                error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3177                if (error)
3178                        goto bread_err2;
3179
3180                rhead = (xlog_rec_header_t *)offset;
3181                error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3182                if (error)
3183                        goto bread_err2;
3184
3185                /* blocks in data section */
3186                bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3187                error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3188                                   &offset);
3189                if (error)
3190                        goto bread_err2;
3191
3192                error = xlog_recover_process(log, rhash, rhead, offset, pass,
3193                                             &buffer_list);
3194                if (error)
3195                        goto bread_err2;
3196
3197                blk_no += bblks + hblks;
3198                rhead_blk = blk_no;
3199        }
3200
3201 bread_err2:
3202        kmem_free(dbp);
3203 bread_err1:
3204        kmem_free(hbp);
3205
3206        /*
3207         * Submit buffers that have been added from the last record processed,
3208         * regardless of error status.
3209         */
3210        if (!list_empty(&buffer_list))
3211                error2 = xfs_buf_delwri_submit(&buffer_list);
3212
3213        if (error && first_bad)
3214                *first_bad = rhead_blk;
3215
3216        /*
3217         * Transactions are freed at commit time but transactions without commit
3218         * records on disk are never committed. Free any that may be left in the
3219         * hash table.
3220         */
3221        for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3222                struct hlist_node       *tmp;
3223                struct xlog_recover     *trans;
3224
3225                hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3226                        xlog_recover_free_trans(trans);
3227        }
3228
3229        return error ? error : error2;
3230}
3231
3232/*
3233 * Do the recovery of the log.  We actually do this in two phases.
3234 * The two passes are necessary in order to implement the function
3235 * of cancelling a record written into the log.  The first pass
3236 * determines those things which have been cancelled, and the
3237 * second pass replays log items normally except for those which
3238 * have been cancelled.  The handling of the replay and cancellations
3239 * takes place in the log item type specific routines.
3240 *
3241 * The table of items which have cancel records in the log is allocated
3242 * and freed at this level, since only here do we know when all of
3243 * the log recovery has been completed.
3244 */
3245STATIC int
3246xlog_do_log_recovery(
3247        struct xlog     *log,
3248        xfs_daddr_t     head_blk,
3249        xfs_daddr_t     tail_blk)
3250{
3251        int             error, i;
3252
3253        ASSERT(head_blk != tail_blk);
3254
3255        /*
3256         * First do a pass to find all of the cancelled buf log items.
3257         * Store them in the buf_cancel_table for use in the second pass.
3258         */
3259        log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3260                                                 sizeof(struct list_head),
3261                                                 0);
3262        for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3263                INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3264
3265        error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3266                                      XLOG_RECOVER_PASS1, NULL);
3267        if (error != 0) {
3268                kmem_free(log->l_buf_cancel_table);
3269                log->l_buf_cancel_table = NULL;
3270                return error;
3271        }
3272        /*
3273         * Then do a second pass to actually recover the items in the log.
3274         * When it is complete free the table of buf cancel items.
3275         */
3276        error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3277                                      XLOG_RECOVER_PASS2, NULL);
3278#ifdef DEBUG
3279        if (!error) {
3280                int     i;
3281
3282                for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3283                        ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3284        }
3285#endif  /* DEBUG */
3286
3287        kmem_free(log->l_buf_cancel_table);
3288        log->l_buf_cancel_table = NULL;
3289
3290        return error;
3291}
3292
3293/*
3294 * Do the actual recovery
3295 */
3296STATIC int
3297xlog_do_recover(
3298        struct xlog             *log,
3299        xfs_daddr_t             head_blk,
3300        xfs_daddr_t             tail_blk)
3301{
3302        struct xfs_mount        *mp = log->l_mp;
3303        struct xfs_buf          *bp = mp->m_sb_bp;
3304        struct xfs_sb           *sbp = &mp->m_sb;
3305        int                     error;
3306
3307        trace_xfs_log_recover(log, head_blk, tail_blk);
3308
3309        /*
3310         * First replay the images in the log.
3311         */
3312        error = xlog_do_log_recovery(log, head_blk, tail_blk);
3313        if (error)
3314                return error;
3315
3316        if (xlog_is_shutdown(log))
3317                return -EIO;
3318
3319        /*
3320         * We now update the tail_lsn since much of the recovery has completed
3321         * and there may be space available to use.  If there were no extent
3322         * or iunlinks, we can free up the entire log and set the tail_lsn to
3323         * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3324         * lsn of the last known good LR on disk.  If there are extent frees
3325         * or iunlinks they will have some entries in the AIL; so we look at
3326         * the AIL to determine how to set the tail_lsn.
3327         */
3328        xlog_assign_tail_lsn(mp);
3329
3330        /*
3331         * Now that we've finished replaying all buffer and inode updates,
3332         * re-read the superblock and reverify it.
3333         */
3334        xfs_buf_lock(bp);
3335        xfs_buf_hold(bp);
3336        error = _xfs_buf_read(bp, XBF_READ);
3337        if (error) {
3338                if (!xlog_is_shutdown(log)) {
3339                        xfs_buf_ioerror_alert(bp, __this_address);
3340                        ASSERT(0);
3341                }
3342                xfs_buf_relse(bp);
3343                return error;
3344        }
3345
3346        /* Convert superblock from on-disk format */
3347        xfs_sb_from_disk(sbp, bp->b_addr);
3348        xfs_buf_relse(bp);
3349
3350        /* re-initialise in-core superblock and geometry structures */
3351        mp->m_features |= xfs_sb_version_to_features(sbp);
3352        xfs_reinit_percpu_counters(mp);
3353        error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
3354        if (error) {
3355                xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3356                return error;
3357        }
3358        mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
3359
3360        xlog_recover_check_summary(log);
3361
3362        /* Normal transactions can now occur */
3363        clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
3364        return 0;
3365}
3366
3367/*
3368 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3369 *
3370 * Return error or zero.
3371 */
3372int
3373xlog_recover(
3374        struct xlog     *log)
3375{
3376        xfs_daddr_t     head_blk, tail_blk;
3377        int             error;
3378
3379        /* find the tail of the log */
3380        error = xlog_find_tail(log, &head_blk, &tail_blk);
3381        if (error)
3382                return error;
3383
3384        /*
3385         * The superblock was read before the log was available and thus the LSN
3386         * could not be verified. Check the superblock LSN against the current
3387         * LSN now that it's known.
3388         */
3389        if (xfs_has_crc(log->l_mp) &&
3390            !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3391                return -EINVAL;
3392
3393        if (tail_blk != head_blk) {
3394                /* There used to be a comment here:
3395                 *
3396                 * disallow recovery on read-only mounts.  note -- mount
3397                 * checks for ENOSPC and turns it into an intelligent
3398                 * error message.
3399                 * ...but this is no longer true.  Now, unless you specify
3400                 * NORECOVERY (in which case this function would never be
3401                 * called), we just go ahead and recover.  We do this all
3402                 * under the vfs layer, so we can get away with it unless
3403                 * the device itself is read-only, in which case we fail.
3404                 */
3405                if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3406                        return error;
3407                }
3408
3409                /*
3410                 * Version 5 superblock log feature mask validation. We know the
3411                 * log is dirty so check if there are any unknown log features
3412                 * in what we need to recover. If there are unknown features
3413                 * (e.g. unsupported transactions, then simply reject the
3414                 * attempt at recovery before touching anything.
3415                 */
3416                if (xfs_sb_is_v5(&log->l_mp->m_sb) &&
3417                    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3418                                        XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3419                        xfs_warn(log->l_mp,
3420"Superblock has unknown incompatible log features (0x%x) enabled.",
3421                                (log->l_mp->m_sb.sb_features_log_incompat &
3422                                        XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3423                        xfs_warn(log->l_mp,
3424"The log can not be fully and/or safely recovered by this kernel.");
3425                        xfs_warn(log->l_mp,
3426"Please recover the log on a kernel that supports the unknown features.");
3427                        return -EINVAL;
3428                }
3429
3430                /*
3431                 * Delay log recovery if the debug hook is set. This is debug
3432                 * instrumentation to coordinate simulation of I/O failures with
3433                 * log recovery.
3434                 */
3435                if (xfs_globals.log_recovery_delay) {
3436                        xfs_notice(log->l_mp,
3437                                "Delaying log recovery for %d seconds.",
3438                                xfs_globals.log_recovery_delay);
3439                        msleep(xfs_globals.log_recovery_delay * 1000);
3440                }
3441
3442                xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3443                                log->l_mp->m_logname ? log->l_mp->m_logname
3444                                                     : "internal");
3445
3446                error = xlog_do_recover(log, head_blk, tail_blk);
3447                set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
3448        }
3449        return error;
3450}
3451
3452/*
3453 * In the first part of recovery we replay inodes and buffers and build up the
3454 * list of intents which need to be processed. Here we process the intents and
3455 * clean up the on disk unlinked inode lists. This is separated from the first
3456 * part of recovery so that the root and real-time bitmap inodes can be read in
3457 * from disk in between the two stages.  This is necessary so that we can free
3458 * space in the real-time portion of the file system.
3459 */
3460int
3461xlog_recover_finish(
3462        struct xlog     *log)
3463{
3464        int     error;
3465
3466        error = xlog_recover_process_intents(log);
3467        if (error) {
3468                /*
3469                 * Cancel all the unprocessed intent items now so that we don't
3470                 * leave them pinned in the AIL.  This can cause the AIL to
3471                 * livelock on the pinned item if anyone tries to push the AIL
3472                 * (inode reclaim does this) before we get around to
3473                 * xfs_log_mount_cancel.
3474                 */
3475                xlog_recover_cancel_intents(log);
3476                xfs_alert(log->l_mp, "Failed to recover intents");
3477                xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
3478                return error;
3479        }
3480
3481        /*
3482         * Sync the log to get all the intents out of the AIL.  This isn't
3483         * absolutely necessary, but it helps in case the unlink transactions
3484         * would have problems pushing the intents out of the way.
3485         */
3486        xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3487
3488        /*
3489         * Now that we've recovered the log and all the intents, we can clear
3490         * the log incompat feature bits in the superblock because there's no
3491         * longer anything to protect.  We rely on the AIL push to write out the
3492         * updated superblock after everything else.
3493         */
3494        if (xfs_clear_incompat_log_features(log->l_mp)) {
3495                error = xfs_sync_sb(log->l_mp, false);
3496                if (error < 0) {
3497                        xfs_alert(log->l_mp,
3498        "Failed to clear log incompat features on recovery");
3499                        return error;
3500                }
3501        }
3502
3503        xlog_recover_process_iunlinks(log);
3504        xlog_recover_check_summary(log);
3505        return 0;
3506}
3507
3508void
3509xlog_recover_cancel(
3510        struct xlog     *log)
3511{
3512        if (xlog_recovery_needed(log))
3513                xlog_recover_cancel_intents(log);
3514}
3515
3516#if defined(DEBUG)
3517/*
3518 * Read all of the agf and agi counters and check that they
3519 * are consistent with the superblock counters.
3520 */
3521STATIC void
3522xlog_recover_check_summary(
3523        struct xlog             *log)
3524{
3525        struct xfs_mount        *mp = log->l_mp;
3526        struct xfs_perag        *pag;
3527        struct xfs_buf          *agfbp;
3528        struct xfs_buf          *agibp;
3529        xfs_agnumber_t          agno;
3530        uint64_t                freeblks;
3531        uint64_t                itotal;
3532        uint64_t                ifree;
3533        int                     error;
3534
3535        mp = log->l_mp;
3536
3537        freeblks = 0LL;
3538        itotal = 0LL;
3539        ifree = 0LL;
3540        for_each_perag(mp, agno, pag) {
3541                error = xfs_read_agf(mp, NULL, pag->pag_agno, 0, &agfbp);
3542                if (error) {
3543                        xfs_alert(mp, "%s agf read failed agno %d error %d",
3544                                                __func__, pag->pag_agno, error);
3545                } else {
3546                        struct xfs_agf  *agfp = agfbp->b_addr;
3547
3548                        freeblks += be32_to_cpu(agfp->agf_freeblks) +
3549                                    be32_to_cpu(agfp->agf_flcount);
3550                        xfs_buf_relse(agfbp);
3551                }
3552
3553                error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
3554                if (error) {
3555                        xfs_alert(mp, "%s agi read failed agno %d error %d",
3556                                                __func__, pag->pag_agno, error);
3557                } else {
3558                        struct xfs_agi  *agi = agibp->b_addr;
3559
3560                        itotal += be32_to_cpu(agi->agi_count);
3561                        ifree += be32_to_cpu(agi->agi_freecount);
3562                        xfs_buf_relse(agibp);
3563                }
3564        }
3565}
3566#endif /* DEBUG */
3567