linux/fs/xfs/xfs_buf_item.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_format.h"
  21#include "xfs_log_format.h"
  22#include "xfs_trans_resv.h"
  23#include "xfs_bit.h"
  24#include "xfs_sb.h"
  25#include "xfs_mount.h"
  26#include "xfs_trans.h"
  27#include "xfs_buf_item.h"
  28#include "xfs_trans_priv.h"
  29#include "xfs_error.h"
  30#include "xfs_trace.h"
  31#include "xfs_log.h"
  32#include "xfs_inode.h"
  33
  34
  35kmem_zone_t     *xfs_buf_item_zone;
  36
  37static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
  38{
  39        return container_of(lip, struct xfs_buf_log_item, bli_item);
  40}
  41
  42STATIC void     xfs_buf_do_callbacks(struct xfs_buf *bp);
  43
  44static inline int
  45xfs_buf_log_format_size(
  46        struct xfs_buf_log_format *blfp)
  47{
  48        return offsetof(struct xfs_buf_log_format, blf_data_map) +
  49                        (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
  50}
  51
  52/*
  53 * This returns the number of log iovecs needed to log the
  54 * given buf log item.
  55 *
  56 * It calculates this as 1 iovec for the buf log format structure
  57 * and 1 for each stretch of non-contiguous chunks to be logged.
  58 * Contiguous chunks are logged in a single iovec.
  59 *
  60 * If the XFS_BLI_STALE flag has been set, then log nothing.
  61 */
  62STATIC void
  63xfs_buf_item_size_segment(
  64        struct xfs_buf_log_item *bip,
  65        struct xfs_buf_log_format *blfp,
  66        int                     *nvecs,
  67        int                     *nbytes)
  68{
  69        struct xfs_buf          *bp = bip->bli_buf;
  70        int                     next_bit;
  71        int                     last_bit;
  72
  73        last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
  74        if (last_bit == -1)
  75                return;
  76
  77        /*
  78         * initial count for a dirty buffer is 2 vectors - the format structure
  79         * and the first dirty region.
  80         */
  81        *nvecs += 2;
  82        *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
  83
  84        while (last_bit != -1) {
  85                /*
  86                 * This takes the bit number to start looking from and
  87                 * returns the next set bit from there.  It returns -1
  88                 * if there are no more bits set or the start bit is
  89                 * beyond the end of the bitmap.
  90                 */
  91                next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
  92                                        last_bit + 1);
  93                /*
  94                 * If we run out of bits, leave the loop,
  95                 * else if we find a new set of bits bump the number of vecs,
  96                 * else keep scanning the current set of bits.
  97                 */
  98                if (next_bit == -1) {
  99                        break;
 100                } else if (next_bit != last_bit + 1) {
 101                        last_bit = next_bit;
 102                        (*nvecs)++;
 103                } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
 104                           (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
 105                            XFS_BLF_CHUNK)) {
 106                        last_bit = next_bit;
 107                        (*nvecs)++;
 108                } else {
 109                        last_bit++;
 110                }
 111                *nbytes += XFS_BLF_CHUNK;
 112        }
 113}
 114
 115/*
 116 * This returns the number of log iovecs needed to log the given buf log item.
 117 *
 118 * It calculates this as 1 iovec for the buf log format structure and 1 for each
 119 * stretch of non-contiguous chunks to be logged.  Contiguous chunks are logged
 120 * in a single iovec.
 121 *
 122 * Discontiguous buffers need a format structure per region that that is being
 123 * logged. This makes the changes in the buffer appear to log recovery as though
 124 * they came from separate buffers, just like would occur if multiple buffers
 125 * were used instead of a single discontiguous buffer. This enables
 126 * discontiguous buffers to be in-memory constructs, completely transparent to
 127 * what ends up on disk.
 128 *
 129 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
 130 * format structures.
 131 */
 132STATIC void
 133xfs_buf_item_size(
 134        struct xfs_log_item     *lip,
 135        int                     *nvecs,
 136        int                     *nbytes)
 137{
 138        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 139        int                     i;
 140
 141        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 142        if (bip->bli_flags & XFS_BLI_STALE) {
 143                /*
 144                 * The buffer is stale, so all we need to log
 145                 * is the buf log format structure with the
 146                 * cancel flag in it.
 147                 */
 148                trace_xfs_buf_item_size_stale(bip);
 149                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 150                *nvecs += bip->bli_format_count;
 151                for (i = 0; i < bip->bli_format_count; i++) {
 152                        *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
 153                }
 154                return;
 155        }
 156
 157        ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
 158
 159        if (bip->bli_flags & XFS_BLI_ORDERED) {
 160                /*
 161                 * The buffer has been logged just to order it.
 162                 * It is not being included in the transaction
 163                 * commit, so no vectors are used at all.
 164                 */
 165                trace_xfs_buf_item_size_ordered(bip);
 166                *nvecs = XFS_LOG_VEC_ORDERED;
 167                return;
 168        }
 169
 170        /*
 171         * the vector count is based on the number of buffer vectors we have
 172         * dirty bits in. This will only be greater than one when we have a
 173         * compound buffer with more than one segment dirty. Hence for compound
 174         * buffers we need to track which segment the dirty bits correspond to,
 175         * and when we move from one segment to the next increment the vector
 176         * count for the extra buf log format structure that will need to be
 177         * written.
 178         */
 179        for (i = 0; i < bip->bli_format_count; i++) {
 180                xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
 181                                          nvecs, nbytes);
 182        }
 183        trace_xfs_buf_item_size(bip);
 184}
 185
 186static inline void
 187xfs_buf_item_copy_iovec(
 188        struct xfs_log_vec      *lv,
 189        struct xfs_log_iovec    **vecp,
 190        struct xfs_buf          *bp,
 191        uint                    offset,
 192        int                     first_bit,
 193        uint                    nbits)
 194{
 195        offset += first_bit * XFS_BLF_CHUNK;
 196        xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
 197                        xfs_buf_offset(bp, offset),
 198                        nbits * XFS_BLF_CHUNK);
 199}
 200
 201static inline bool
 202xfs_buf_item_straddle(
 203        struct xfs_buf          *bp,
 204        uint                    offset,
 205        int                     next_bit,
 206        int                     last_bit)
 207{
 208        return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
 209                (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
 210                 XFS_BLF_CHUNK);
 211}
 212
 213static void
 214xfs_buf_item_format_segment(
 215        struct xfs_buf_log_item *bip,
 216        struct xfs_log_vec      *lv,
 217        struct xfs_log_iovec    **vecp,
 218        uint                    offset,
 219        struct xfs_buf_log_format *blfp)
 220{
 221        struct xfs_buf  *bp = bip->bli_buf;
 222        uint            base_size;
 223        int             first_bit;
 224        int             last_bit;
 225        int             next_bit;
 226        uint            nbits;
 227
 228        /* copy the flags across from the base format item */
 229        blfp->blf_flags = bip->__bli_format.blf_flags;
 230
 231        /*
 232         * Base size is the actual size of the ondisk structure - it reflects
 233         * the actual size of the dirty bitmap rather than the size of the in
 234         * memory structure.
 235         */
 236        base_size = xfs_buf_log_format_size(blfp);
 237
 238        first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
 239        if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
 240                /*
 241                 * If the map is not be dirty in the transaction, mark
 242                 * the size as zero and do not advance the vector pointer.
 243                 */
 244                return;
 245        }
 246
 247        blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
 248        blfp->blf_size = 1;
 249
 250        if (bip->bli_flags & XFS_BLI_STALE) {
 251                /*
 252                 * The buffer is stale, so all we need to log
 253                 * is the buf log format structure with the
 254                 * cancel flag in it.
 255                 */
 256                trace_xfs_buf_item_format_stale(bip);
 257                ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
 258                return;
 259        }
 260
 261
 262        /*
 263         * Fill in an iovec for each set of contiguous chunks.
 264         */
 265        last_bit = first_bit;
 266        nbits = 1;
 267        for (;;) {
 268                /*
 269                 * This takes the bit number to start looking from and
 270                 * returns the next set bit from there.  It returns -1
 271                 * if there are no more bits set or the start bit is
 272                 * beyond the end of the bitmap.
 273                 */
 274                next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
 275                                        (uint)last_bit + 1);
 276                /*
 277                 * If we run out of bits fill in the last iovec and get out of
 278                 * the loop.  Else if we start a new set of bits then fill in
 279                 * the iovec for the series we were looking at and start
 280                 * counting the bits in the new one.  Else we're still in the
 281                 * same set of bits so just keep counting and scanning.
 282                 */
 283                if (next_bit == -1) {
 284                        xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
 285                                                first_bit, nbits);
 286                        blfp->blf_size++;
 287                        break;
 288                } else if (next_bit != last_bit + 1 ||
 289                           xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
 290                        xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
 291                                                first_bit, nbits);
 292                        blfp->blf_size++;
 293                        first_bit = next_bit;
 294                        last_bit = next_bit;
 295                        nbits = 1;
 296                } else {
 297                        last_bit++;
 298                        nbits++;
 299                }
 300        }
 301}
 302
 303/*
 304 * This is called to fill in the vector of log iovecs for the
 305 * given log buf item.  It fills the first entry with a buf log
 306 * format structure, and the rest point to contiguous chunks
 307 * within the buffer.
 308 */
 309STATIC void
 310xfs_buf_item_format(
 311        struct xfs_log_item     *lip,
 312        struct xfs_log_vec      *lv)
 313{
 314        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 315        struct xfs_buf          *bp = bip->bli_buf;
 316        struct xfs_log_iovec    *vecp = NULL;
 317        uint                    offset = 0;
 318        int                     i;
 319
 320        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 321        ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
 322               (bip->bli_flags & XFS_BLI_STALE));
 323        ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
 324               (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
 325                && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
 326        ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
 327               (bip->bli_flags & XFS_BLI_STALE));
 328
 329
 330        /*
 331         * If it is an inode buffer, transfer the in-memory state to the
 332         * format flags and clear the in-memory state.
 333         *
 334         * For buffer based inode allocation, we do not transfer
 335         * this state if the inode buffer allocation has not yet been committed
 336         * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
 337         * correct replay of the inode allocation.
 338         *
 339         * For icreate item based inode allocation, the buffers aren't written
 340         * to the journal during allocation, and hence we should always tag the
 341         * buffer as an inode buffer so that the correct unlinked list replay
 342         * occurs during recovery.
 343         */
 344        if (bip->bli_flags & XFS_BLI_INODE_BUF) {
 345                if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
 346                    !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
 347                      xfs_log_item_in_current_chkpt(lip)))
 348                        bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
 349                bip->bli_flags &= ~XFS_BLI_INODE_BUF;
 350        }
 351
 352        for (i = 0; i < bip->bli_format_count; i++) {
 353                xfs_buf_item_format_segment(bip, lv, &vecp, offset,
 354                                            &bip->bli_formats[i]);
 355                offset += BBTOB(bp->b_maps[i].bm_len);
 356        }
 357
 358        /*
 359         * Check to make sure everything is consistent.
 360         */
 361        trace_xfs_buf_item_format(bip);
 362}
 363
 364/*
 365 * This is called to pin the buffer associated with the buf log item in memory
 366 * so it cannot be written out.
 367 *
 368 * We also always take a reference to the buffer log item here so that the bli
 369 * is held while the item is pinned in memory. This means that we can
 370 * unconditionally drop the reference count a transaction holds when the
 371 * transaction is completed.
 372 */
 373STATIC void
 374xfs_buf_item_pin(
 375        struct xfs_log_item     *lip)
 376{
 377        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 378
 379        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 380        ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
 381               (bip->bli_flags & XFS_BLI_ORDERED) ||
 382               (bip->bli_flags & XFS_BLI_STALE));
 383
 384        trace_xfs_buf_item_pin(bip);
 385
 386        atomic_inc(&bip->bli_refcount);
 387        atomic_inc(&bip->bli_buf->b_pin_count);
 388}
 389
 390/*
 391 * This is called to unpin the buffer associated with the buf log
 392 * item which was previously pinned with a call to xfs_buf_item_pin().
 393 *
 394 * Also drop the reference to the buf item for the current transaction.
 395 * If the XFS_BLI_STALE flag is set and we are the last reference,
 396 * then free up the buf log item and unlock the buffer.
 397 *
 398 * If the remove flag is set we are called from uncommit in the
 399 * forced-shutdown path.  If that is true and the reference count on
 400 * the log item is going to drop to zero we need to free the item's
 401 * descriptor in the transaction.
 402 */
 403STATIC void
 404xfs_buf_item_unpin(
 405        struct xfs_log_item     *lip,
 406        int                     remove)
 407{
 408        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 409        xfs_buf_t       *bp = bip->bli_buf;
 410        struct xfs_ail  *ailp = lip->li_ailp;
 411        int             stale = bip->bli_flags & XFS_BLI_STALE;
 412        int             freed;
 413
 414        ASSERT(bp->b_fspriv == bip);
 415        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 416
 417        trace_xfs_buf_item_unpin(bip);
 418
 419        freed = atomic_dec_and_test(&bip->bli_refcount);
 420
 421        if (atomic_dec_and_test(&bp->b_pin_count))
 422                wake_up_all(&bp->b_waiters);
 423
 424        if (freed && stale) {
 425                ASSERT(bip->bli_flags & XFS_BLI_STALE);
 426                ASSERT(xfs_buf_islocked(bp));
 427                ASSERT(bp->b_flags & XBF_STALE);
 428                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 429
 430                trace_xfs_buf_item_unpin_stale(bip);
 431
 432                if (remove) {
 433                        /*
 434                         * If we are in a transaction context, we have to
 435                         * remove the log item from the transaction as we are
 436                         * about to release our reference to the buffer.  If we
 437                         * don't, the unlock that occurs later in
 438                         * xfs_trans_uncommit() will try to reference the
 439                         * buffer which we no longer have a hold on.
 440                         */
 441                        if (lip->li_desc)
 442                                xfs_trans_del_item(lip);
 443
 444                        /*
 445                         * Since the transaction no longer refers to the buffer,
 446                         * the buffer should no longer refer to the transaction.
 447                         */
 448                        bp->b_transp = NULL;
 449                }
 450
 451                /*
 452                 * If we get called here because of an IO error, we may
 453                 * or may not have the item on the AIL. xfs_trans_ail_delete()
 454                 * will take care of that situation.
 455                 * xfs_trans_ail_delete() drops the AIL lock.
 456                 */
 457                if (bip->bli_flags & XFS_BLI_STALE_INODE) {
 458                        xfs_buf_do_callbacks(bp);
 459                        bp->b_fspriv = NULL;
 460                        bp->b_iodone = NULL;
 461                } else {
 462                        spin_lock(&ailp->xa_lock);
 463                        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
 464                        xfs_buf_item_relse(bp);
 465                        ASSERT(bp->b_fspriv == NULL);
 466                }
 467                xfs_buf_relse(bp);
 468        } else if (freed && remove) {
 469                /*
 470                 * There are currently two references to the buffer - the active
 471                 * LRU reference and the buf log item. What we are about to do
 472                 * here - simulate a failed IO completion - requires 3
 473                 * references.
 474                 *
 475                 * The LRU reference is removed by the xfs_buf_stale() call. The
 476                 * buf item reference is removed by the xfs_buf_iodone()
 477                 * callback that is run by xfs_buf_do_callbacks() during ioend
 478                 * processing (via the bp->b_iodone callback), and then finally
 479                 * the ioend processing will drop the IO reference if the buffer
 480                 * is marked XBF_ASYNC.
 481                 *
 482                 * Hence we need to take an additional reference here so that IO
 483                 * completion processing doesn't free the buffer prematurely.
 484                 */
 485                xfs_buf_lock(bp);
 486                xfs_buf_hold(bp);
 487                bp->b_flags |= XBF_ASYNC;
 488                xfs_buf_ioerror(bp, -EIO);
 489                bp->b_flags &= ~XBF_DONE;
 490                xfs_buf_stale(bp);
 491                xfs_buf_ioend(bp);
 492        }
 493}
 494
 495/*
 496 * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
 497 * seconds so as to not spam logs too much on repeated detection of the same
 498 * buffer being bad..
 499 */
 500
 501static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
 502
 503STATIC uint
 504xfs_buf_item_push(
 505        struct xfs_log_item     *lip,
 506        struct list_head        *buffer_list)
 507{
 508        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 509        struct xfs_buf          *bp = bip->bli_buf;
 510        uint                    rval = XFS_ITEM_SUCCESS;
 511
 512        if (xfs_buf_ispinned(bp))
 513                return XFS_ITEM_PINNED;
 514        if (!xfs_buf_trylock(bp)) {
 515                /*
 516                 * If we have just raced with a buffer being pinned and it has
 517                 * been marked stale, we could end up stalling until someone else
 518                 * issues a log force to unpin the stale buffer. Check for the
 519                 * race condition here so xfsaild recognizes the buffer is pinned
 520                 * and queues a log force to move it along.
 521                 */
 522                if (xfs_buf_ispinned(bp))
 523                        return XFS_ITEM_PINNED;
 524                return XFS_ITEM_LOCKED;
 525        }
 526
 527        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
 528
 529        trace_xfs_buf_item_push(bip);
 530
 531        /* has a previous flush failed due to IO errors? */
 532        if ((bp->b_flags & XBF_WRITE_FAIL) &&
 533            ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) {
 534                xfs_warn(bp->b_target->bt_mount,
 535"Failing async write on buffer block 0x%llx. Retrying async write.",
 536                         (long long)bp->b_bn);
 537        }
 538
 539        if (!xfs_buf_delwri_queue(bp, buffer_list))
 540                rval = XFS_ITEM_FLUSHING;
 541        xfs_buf_unlock(bp);
 542        return rval;
 543}
 544
 545/*
 546 * Release the buffer associated with the buf log item.  If there is no dirty
 547 * logged data associated with the buffer recorded in the buf log item, then
 548 * free the buf log item and remove the reference to it in the buffer.
 549 *
 550 * This call ignores the recursion count.  It is only called when the buffer
 551 * should REALLY be unlocked, regardless of the recursion count.
 552 *
 553 * We unconditionally drop the transaction's reference to the log item. If the
 554 * item was logged, then another reference was taken when it was pinned, so we
 555 * can safely drop the transaction reference now.  This also allows us to avoid
 556 * potential races with the unpin code freeing the bli by not referencing the
 557 * bli after we've dropped the reference count.
 558 *
 559 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
 560 * if necessary but do not unlock the buffer.  This is for support of
 561 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
 562 * free the item.
 563 */
 564STATIC void
 565xfs_buf_item_unlock(
 566        struct xfs_log_item     *lip)
 567{
 568        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 569        struct xfs_buf          *bp = bip->bli_buf;
 570        bool                    aborted = !!(lip->li_flags & XFS_LI_ABORTED);
 571        bool                    hold = !!(bip->bli_flags & XFS_BLI_HOLD);
 572        bool                    dirty = !!(bip->bli_flags & XFS_BLI_DIRTY);
 573#if defined(DEBUG) || defined(XFS_WARN)
 574        bool                    ordered = !!(bip->bli_flags & XFS_BLI_ORDERED);
 575#endif
 576
 577        /* Clear the buffer's association with this transaction. */
 578        bp->b_transp = NULL;
 579
 580        /*
 581         * The per-transaction state has been copied above so clear it from the
 582         * bli.
 583         */
 584        bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
 585
 586        /*
 587         * If the buf item is marked stale, then don't do anything.  We'll
 588         * unlock the buffer and free the buf item when the buffer is unpinned
 589         * for the last time.
 590         */
 591        if (bip->bli_flags & XFS_BLI_STALE) {
 592                trace_xfs_buf_item_unlock_stale(bip);
 593                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 594                if (!aborted) {
 595                        atomic_dec(&bip->bli_refcount);
 596                        return;
 597                }
 598        }
 599
 600        trace_xfs_buf_item_unlock(bip);
 601
 602        /*
 603         * If the buf item isn't tracking any data, free it, otherwise drop the
 604         * reference we hold to it. If we are aborting the transaction, this may
 605         * be the only reference to the buf item, so we free it anyway
 606         * regardless of whether it is dirty or not. A dirty abort implies a
 607         * shutdown, anyway.
 608         *
 609         * The bli dirty state should match whether the blf has logged segments
 610         * except for ordered buffers, where only the bli should be dirty.
 611         */
 612        ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
 613               (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
 614
 615        /*
 616         * Clean buffers, by definition, cannot be in the AIL. However, aborted
 617         * buffers may be in the AIL regardless of dirty state. An aborted
 618         * transaction that invalidates a buffer already in the AIL may have
 619         * marked it stale and cleared the dirty state, for example.
 620         *
 621         * Therefore if we are aborting a buffer and we've just taken the last
 622         * reference away, we have to check if it is in the AIL before freeing
 623         * it. We need to free it in this case, because an aborted transaction
 624         * has already shut the filesystem down and this is the last chance we
 625         * will have to do so.
 626         */
 627        if (atomic_dec_and_test(&bip->bli_refcount)) {
 628                if (aborted) {
 629                        ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
 630                        xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
 631                        xfs_buf_item_relse(bp);
 632                } else if (!dirty)
 633                        xfs_buf_item_relse(bp);
 634        }
 635
 636        if (!hold)
 637                xfs_buf_relse(bp);
 638}
 639
 640/*
 641 * This is called to find out where the oldest active copy of the
 642 * buf log item in the on disk log resides now that the last log
 643 * write of it completed at the given lsn.
 644 * We always re-log all the dirty data in a buffer, so usually the
 645 * latest copy in the on disk log is the only one that matters.  For
 646 * those cases we simply return the given lsn.
 647 *
 648 * The one exception to this is for buffers full of newly allocated
 649 * inodes.  These buffers are only relogged with the XFS_BLI_INODE_BUF
 650 * flag set, indicating that only the di_next_unlinked fields from the
 651 * inodes in the buffers will be replayed during recovery.  If the
 652 * original newly allocated inode images have not yet been flushed
 653 * when the buffer is so relogged, then we need to make sure that we
 654 * keep the old images in the 'active' portion of the log.  We do this
 655 * by returning the original lsn of that transaction here rather than
 656 * the current one.
 657 */
 658STATIC xfs_lsn_t
 659xfs_buf_item_committed(
 660        struct xfs_log_item     *lip,
 661        xfs_lsn_t               lsn)
 662{
 663        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 664
 665        trace_xfs_buf_item_committed(bip);
 666
 667        if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
 668                return lip->li_lsn;
 669        return lsn;
 670}
 671
 672STATIC void
 673xfs_buf_item_committing(
 674        struct xfs_log_item     *lip,
 675        xfs_lsn_t               commit_lsn)
 676{
 677}
 678
 679/*
 680 * This is the ops vector shared by all buf log items.
 681 */
 682static const struct xfs_item_ops xfs_buf_item_ops = {
 683        .iop_size       = xfs_buf_item_size,
 684        .iop_format     = xfs_buf_item_format,
 685        .iop_pin        = xfs_buf_item_pin,
 686        .iop_unpin      = xfs_buf_item_unpin,
 687        .iop_unlock     = xfs_buf_item_unlock,
 688        .iop_committed  = xfs_buf_item_committed,
 689        .iop_push       = xfs_buf_item_push,
 690        .iop_committing = xfs_buf_item_committing
 691};
 692
 693STATIC int
 694xfs_buf_item_get_format(
 695        struct xfs_buf_log_item *bip,
 696        int                     count)
 697{
 698        ASSERT(bip->bli_formats == NULL);
 699        bip->bli_format_count = count;
 700
 701        if (count == 1) {
 702                bip->bli_formats = &bip->__bli_format;
 703                return 0;
 704        }
 705
 706        bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
 707                                KM_SLEEP);
 708        if (!bip->bli_formats)
 709                return -ENOMEM;
 710        return 0;
 711}
 712
 713STATIC void
 714xfs_buf_item_free_format(
 715        struct xfs_buf_log_item *bip)
 716{
 717        if (bip->bli_formats != &bip->__bli_format) {
 718                kmem_free(bip->bli_formats);
 719                bip->bli_formats = NULL;
 720        }
 721}
 722
 723/*
 724 * Allocate a new buf log item to go with the given buffer.
 725 * Set the buffer's b_fsprivate field to point to the new
 726 * buf log item.  If there are other item's attached to the
 727 * buffer (see xfs_buf_attach_iodone() below), then put the
 728 * buf log item at the front.
 729 */
 730int
 731xfs_buf_item_init(
 732        struct xfs_buf  *bp,
 733        struct xfs_mount *mp)
 734{
 735        struct xfs_log_item     *lip = bp->b_fspriv;
 736        struct xfs_buf_log_item *bip;
 737        int                     chunks;
 738        int                     map_size;
 739        int                     error;
 740        int                     i;
 741
 742        /*
 743         * Check to see if there is already a buf log item for
 744         * this buffer.  If there is, it is guaranteed to be
 745         * the first.  If we do already have one, there is
 746         * nothing to do here so return.
 747         */
 748        ASSERT(bp->b_target->bt_mount == mp);
 749        if (lip != NULL && lip->li_type == XFS_LI_BUF)
 750                return 0;
 751
 752        bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
 753        xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
 754        bip->bli_buf = bp;
 755
 756        /*
 757         * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
 758         * can be divided into. Make sure not to truncate any pieces.
 759         * map_size is the size of the bitmap needed to describe the
 760         * chunks of the buffer.
 761         *
 762         * Discontiguous buffer support follows the layout of the underlying
 763         * buffer. This makes the implementation as simple as possible.
 764         */
 765        error = xfs_buf_item_get_format(bip, bp->b_map_count);
 766        ASSERT(error == 0);
 767        if (error) {    /* to stop gcc throwing set-but-unused warnings */
 768                kmem_zone_free(xfs_buf_item_zone, bip);
 769                return error;
 770        }
 771
 772
 773        for (i = 0; i < bip->bli_format_count; i++) {
 774                chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
 775                                      XFS_BLF_CHUNK);
 776                map_size = DIV_ROUND_UP(chunks, NBWORD);
 777
 778                bip->bli_formats[i].blf_type = XFS_LI_BUF;
 779                bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
 780                bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
 781                bip->bli_formats[i].blf_map_size = map_size;
 782        }
 783
 784        /*
 785         * Put the buf item into the list of items attached to the
 786         * buffer at the front.
 787         */
 788        if (bp->b_fspriv)
 789                bip->bli_item.li_bio_list = bp->b_fspriv;
 790        bp->b_fspriv = bip;
 791        xfs_buf_hold(bp);
 792        return 0;
 793}
 794
 795
 796/*
 797 * Mark bytes first through last inclusive as dirty in the buf
 798 * item's bitmap.
 799 */
 800static void
 801xfs_buf_item_log_segment(
 802        uint                    first,
 803        uint                    last,
 804        uint                    *map)
 805{
 806        uint            first_bit;
 807        uint            last_bit;
 808        uint            bits_to_set;
 809        uint            bits_set;
 810        uint            word_num;
 811        uint            *wordp;
 812        uint            bit;
 813        uint            end_bit;
 814        uint            mask;
 815
 816        /*
 817         * Convert byte offsets to bit numbers.
 818         */
 819        first_bit = first >> XFS_BLF_SHIFT;
 820        last_bit = last >> XFS_BLF_SHIFT;
 821
 822        /*
 823         * Calculate the total number of bits to be set.
 824         */
 825        bits_to_set = last_bit - first_bit + 1;
 826
 827        /*
 828         * Get a pointer to the first word in the bitmap
 829         * to set a bit in.
 830         */
 831        word_num = first_bit >> BIT_TO_WORD_SHIFT;
 832        wordp = &map[word_num];
 833
 834        /*
 835         * Calculate the starting bit in the first word.
 836         */
 837        bit = first_bit & (uint)(NBWORD - 1);
 838
 839        /*
 840         * First set any bits in the first word of our range.
 841         * If it starts at bit 0 of the word, it will be
 842         * set below rather than here.  That is what the variable
 843         * bit tells us. The variable bits_set tracks the number
 844         * of bits that have been set so far.  End_bit is the number
 845         * of the last bit to be set in this word plus one.
 846         */
 847        if (bit) {
 848                end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
 849                mask = ((1U << (end_bit - bit)) - 1) << bit;
 850                *wordp |= mask;
 851                wordp++;
 852                bits_set = end_bit - bit;
 853        } else {
 854                bits_set = 0;
 855        }
 856
 857        /*
 858         * Now set bits a whole word at a time that are between
 859         * first_bit and last_bit.
 860         */
 861        while ((bits_to_set - bits_set) >= NBWORD) {
 862                *wordp |= 0xffffffff;
 863                bits_set += NBWORD;
 864                wordp++;
 865        }
 866
 867        /*
 868         * Finally, set any bits left to be set in one last partial word.
 869         */
 870        end_bit = bits_to_set - bits_set;
 871        if (end_bit) {
 872                mask = (1U << end_bit) - 1;
 873                *wordp |= mask;
 874        }
 875}
 876
 877/*
 878 * Mark bytes first through last inclusive as dirty in the buf
 879 * item's bitmap.
 880 */
 881void
 882xfs_buf_item_log(
 883        xfs_buf_log_item_t      *bip,
 884        uint                    first,
 885        uint                    last)
 886{
 887        int                     i;
 888        uint                    start;
 889        uint                    end;
 890        struct xfs_buf          *bp = bip->bli_buf;
 891
 892        /*
 893         * walk each buffer segment and mark them dirty appropriately.
 894         */
 895        start = 0;
 896        for (i = 0; i < bip->bli_format_count; i++) {
 897                if (start > last)
 898                        break;
 899                end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
 900
 901                /* skip to the map that includes the first byte to log */
 902                if (first > end) {
 903                        start += BBTOB(bp->b_maps[i].bm_len);
 904                        continue;
 905                }
 906
 907                /*
 908                 * Trim the range to this segment and mark it in the bitmap.
 909                 * Note that we must convert buffer offsets to segment relative
 910                 * offsets (e.g., the first byte of each segment is byte 0 of
 911                 * that segment).
 912                 */
 913                if (first < start)
 914                        first = start;
 915                if (end > last)
 916                        end = last;
 917                xfs_buf_item_log_segment(first - start, end - start,
 918                                         &bip->bli_formats[i].blf_data_map[0]);
 919
 920                start += BBTOB(bp->b_maps[i].bm_len);
 921        }
 922}
 923
 924
 925/*
 926 * Return true if the buffer has any ranges logged/dirtied by a transaction,
 927 * false otherwise.
 928 */
 929bool
 930xfs_buf_item_dirty_format(
 931        struct xfs_buf_log_item *bip)
 932{
 933        int                     i;
 934
 935        for (i = 0; i < bip->bli_format_count; i++) {
 936                if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
 937                             bip->bli_formats[i].blf_map_size))
 938                        return true;
 939        }
 940
 941        return false;
 942}
 943
 944STATIC void
 945xfs_buf_item_free(
 946        xfs_buf_log_item_t      *bip)
 947{
 948        xfs_buf_item_free_format(bip);
 949        kmem_free(bip->bli_item.li_lv_shadow);
 950        kmem_zone_free(xfs_buf_item_zone, bip);
 951}
 952
 953/*
 954 * This is called when the buf log item is no longer needed.  It should
 955 * free the buf log item associated with the given buffer and clear
 956 * the buffer's pointer to the buf log item.  If there are no more
 957 * items in the list, clear the b_iodone field of the buffer (see
 958 * xfs_buf_attach_iodone() below).
 959 */
 960void
 961xfs_buf_item_relse(
 962        xfs_buf_t       *bp)
 963{
 964        xfs_buf_log_item_t      *bip = bp->b_fspriv;
 965
 966        trace_xfs_buf_item_relse(bp, _RET_IP_);
 967        ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
 968
 969        bp->b_fspriv = bip->bli_item.li_bio_list;
 970        if (bp->b_fspriv == NULL)
 971                bp->b_iodone = NULL;
 972
 973        xfs_buf_rele(bp);
 974        xfs_buf_item_free(bip);
 975}
 976
 977
 978/*
 979 * Add the given log item with its callback to the list of callbacks
 980 * to be called when the buffer's I/O completes.  If it is not set
 981 * already, set the buffer's b_iodone() routine to be
 982 * xfs_buf_iodone_callbacks() and link the log item into the list of
 983 * items rooted at b_fsprivate.  Items are always added as the second
 984 * entry in the list if there is a first, because the buf item code
 985 * assumes that the buf log item is first.
 986 */
 987void
 988xfs_buf_attach_iodone(
 989        xfs_buf_t       *bp,
 990        void            (*cb)(xfs_buf_t *, xfs_log_item_t *),
 991        xfs_log_item_t  *lip)
 992{
 993        xfs_log_item_t  *head_lip;
 994
 995        ASSERT(xfs_buf_islocked(bp));
 996
 997        lip->li_cb = cb;
 998        head_lip = bp->b_fspriv;
 999        if (head_lip) {
1000                lip->li_bio_list = head_lip->li_bio_list;
1001                head_lip->li_bio_list = lip;
1002        } else {
1003                bp->b_fspriv = lip;
1004        }
1005
1006        ASSERT(bp->b_iodone == NULL ||
1007               bp->b_iodone == xfs_buf_iodone_callbacks);
1008        bp->b_iodone = xfs_buf_iodone_callbacks;
1009}
1010
1011/*
1012 * We can have many callbacks on a buffer. Running the callbacks individually
1013 * can cause a lot of contention on the AIL lock, so we allow for a single
1014 * callback to be able to scan the remaining lip->li_bio_list for other items
1015 * of the same type and callback to be processed in the first call.
1016 *
1017 * As a result, the loop walking the callback list below will also modify the
1018 * list. it removes the first item from the list and then runs the callback.
1019 * The loop then restarts from the new head of the list. This allows the
1020 * callback to scan and modify the list attached to the buffer and we don't
1021 * have to care about maintaining a next item pointer.
1022 */
1023STATIC void
1024xfs_buf_do_callbacks(
1025        struct xfs_buf          *bp)
1026{
1027        struct xfs_log_item     *lip;
1028
1029        while ((lip = bp->b_fspriv) != NULL) {
1030                bp->b_fspriv = lip->li_bio_list;
1031                ASSERT(lip->li_cb != NULL);
1032                /*
1033                 * Clear the next pointer so we don't have any
1034                 * confusion if the item is added to another buf.
1035                 * Don't touch the log item after calling its
1036                 * callback, because it could have freed itself.
1037                 */
1038                lip->li_bio_list = NULL;
1039                lip->li_cb(bp, lip);
1040        }
1041}
1042
1043/*
1044 * Invoke the error state callback for each log item affected by the failed I/O.
1045 *
1046 * If a metadata buffer write fails with a non-permanent error, the buffer is
1047 * eventually resubmitted and so the completion callbacks are not run. The error
1048 * state may need to be propagated to the log items attached to the buffer,
1049 * however, so the next AIL push of the item knows hot to handle it correctly.
1050 */
1051STATIC void
1052xfs_buf_do_callbacks_fail(
1053        struct xfs_buf          *bp)
1054{
1055        struct xfs_log_item     *next;
1056        struct xfs_log_item     *lip = bp->b_fspriv;
1057        struct xfs_ail          *ailp = lip->li_ailp;
1058
1059        spin_lock(&ailp->xa_lock);
1060        for (; lip; lip = next) {
1061                next = lip->li_bio_list;
1062                if (lip->li_ops->iop_error)
1063                        lip->li_ops->iop_error(lip, bp);
1064        }
1065        spin_unlock(&ailp->xa_lock);
1066}
1067
1068static bool
1069xfs_buf_iodone_callback_error(
1070        struct xfs_buf          *bp)
1071{
1072        struct xfs_log_item     *lip = bp->b_fspriv;
1073        struct xfs_mount        *mp = lip->li_mountp;
1074        static ulong            lasttime;
1075        static xfs_buftarg_t    *lasttarg;
1076        struct xfs_error_cfg    *cfg;
1077
1078        /*
1079         * If we've already decided to shutdown the filesystem because of
1080         * I/O errors, there's no point in giving this a retry.
1081         */
1082        if (XFS_FORCED_SHUTDOWN(mp))
1083                goto out_stale;
1084
1085        if (bp->b_target != lasttarg ||
1086            time_after(jiffies, (lasttime + 5*HZ))) {
1087                lasttime = jiffies;
1088                xfs_buf_ioerror_alert(bp, __func__);
1089        }
1090        lasttarg = bp->b_target;
1091
1092        /* synchronous writes will have callers process the error */
1093        if (!(bp->b_flags & XBF_ASYNC))
1094                goto out_stale;
1095
1096        trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1097        ASSERT(bp->b_iodone != NULL);
1098
1099        cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1100
1101        /*
1102         * If the write was asynchronous then no one will be looking for the
1103         * error.  If this is the first failure of this type, clear the error
1104         * state and write the buffer out again. This means we always retry an
1105         * async write failure at least once, but we also need to set the buffer
1106         * up to behave correctly now for repeated failures.
1107         */
1108        if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) ||
1109             bp->b_last_error != bp->b_error) {
1110                bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
1111                bp->b_last_error = bp->b_error;
1112                if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1113                    !bp->b_first_retry_time)
1114                        bp->b_first_retry_time = jiffies;
1115
1116                xfs_buf_ioerror(bp, 0);
1117                xfs_buf_submit(bp);
1118                return true;
1119        }
1120
1121        /*
1122         * Repeated failure on an async write. Take action according to the
1123         * error configuration we have been set up to use.
1124         */
1125
1126        if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1127            ++bp->b_retries > cfg->max_retries)
1128                        goto permanent_error;
1129        if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1130            time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1131                        goto permanent_error;
1132
1133        /* At unmount we may treat errors differently */
1134        if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
1135                goto permanent_error;
1136
1137        /*
1138         * Still a transient error, run IO completion failure callbacks and let
1139         * the higher layers retry the buffer.
1140         */
1141        xfs_buf_do_callbacks_fail(bp);
1142        xfs_buf_ioerror(bp, 0);
1143        xfs_buf_relse(bp);
1144        return true;
1145
1146        /*
1147         * Permanent error - we need to trigger a shutdown if we haven't already
1148         * to indicate that inconsistency will result from this action.
1149         */
1150permanent_error:
1151        xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1152out_stale:
1153        xfs_buf_stale(bp);
1154        bp->b_flags |= XBF_DONE;
1155        trace_xfs_buf_error_relse(bp, _RET_IP_);
1156        return false;
1157}
1158
1159/*
1160 * This is the iodone() function for buffers which have had callbacks attached
1161 * to them by xfs_buf_attach_iodone(). We need to iterate the items on the
1162 * callback list, mark the buffer as having no more callbacks and then push the
1163 * buffer through IO completion processing.
1164 */
1165void
1166xfs_buf_iodone_callbacks(
1167        struct xfs_buf          *bp)
1168{
1169        /*
1170         * If there is an error, process it. Some errors require us
1171         * to run callbacks after failure processing is done so we
1172         * detect that and take appropriate action.
1173         */
1174        if (bp->b_error && xfs_buf_iodone_callback_error(bp))
1175                return;
1176
1177        /*
1178         * Successful IO or permanent error. Either way, we can clear the
1179         * retry state here in preparation for the next error that may occur.
1180         */
1181        bp->b_last_error = 0;
1182        bp->b_retries = 0;
1183        bp->b_first_retry_time = 0;
1184
1185        xfs_buf_do_callbacks(bp);
1186        bp->b_fspriv = NULL;
1187        bp->b_iodone = NULL;
1188        xfs_buf_ioend(bp);
1189}
1190
1191/*
1192 * This is the iodone() function for buffers which have been
1193 * logged.  It is called when they are eventually flushed out.
1194 * It should remove the buf item from the AIL, and free the buf item.
1195 * It is called by xfs_buf_iodone_callbacks() above which will take
1196 * care of cleaning up the buffer itself.
1197 */
1198void
1199xfs_buf_iodone(
1200        struct xfs_buf          *bp,
1201        struct xfs_log_item     *lip)
1202{
1203        struct xfs_ail          *ailp = lip->li_ailp;
1204
1205        ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1206
1207        xfs_buf_rele(bp);
1208
1209        /*
1210         * If we are forcibly shutting down, this may well be
1211         * off the AIL already. That's because we simulate the
1212         * log-committed callbacks to unpin these buffers. Or we may never
1213         * have put this item on AIL because of the transaction was
1214         * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1215         *
1216         * Either way, AIL is useless if we're forcing a shutdown.
1217         */
1218        spin_lock(&ailp->xa_lock);
1219        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1220        xfs_buf_item_free(BUF_ITEM(lip));
1221}
1222
1223/*
1224 * Requeue a failed buffer for writeback
1225 *
1226 * Return true if the buffer has been re-queued properly, false otherwise
1227 */
1228bool
1229xfs_buf_resubmit_failed_buffers(
1230        struct xfs_buf          *bp,
1231        struct xfs_log_item     *lip,
1232        struct list_head        *buffer_list)
1233{
1234        struct xfs_log_item     *next;
1235
1236        /*
1237         * Clear XFS_LI_FAILED flag from all items before resubmit
1238         *
1239         * XFS_LI_FAILED set/clear is protected by xa_lock, caller  this
1240         * function already have it acquired
1241         */
1242        for (; lip; lip = next) {
1243                next = lip->li_bio_list;
1244                xfs_clear_li_failed(lip);
1245        }
1246
1247        /* Add this buffer back to the delayed write list */
1248        return xfs_buf_delwri_queue(bp, buffer_list);
1249}
1250