linux/fs/xfs/xfs_refcount_item.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2016 Oracle.  All Rights Reserved.
   4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_format.h"
   9#include "xfs_log_format.h"
  10#include "xfs_trans_resv.h"
  11#include "xfs_bit.h"
  12#include "xfs_shared.h"
  13#include "xfs_mount.h"
  14#include "xfs_defer.h"
  15#include "xfs_trans.h"
  16#include "xfs_trans_priv.h"
  17#include "xfs_refcount_item.h"
  18#include "xfs_log.h"
  19#include "xfs_refcount.h"
  20#include "xfs_error.h"
  21#include "xfs_log_priv.h"
  22#include "xfs_log_recover.h"
  23
  24kmem_zone_t     *xfs_cui_zone;
  25kmem_zone_t     *xfs_cud_zone;
  26
  27static const struct xfs_item_ops xfs_cui_item_ops;
  28
  29static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
  30{
  31        return container_of(lip, struct xfs_cui_log_item, cui_item);
  32}
  33
  34STATIC void
  35xfs_cui_item_free(
  36        struct xfs_cui_log_item *cuip)
  37{
  38        if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
  39                kmem_free(cuip);
  40        else
  41                kmem_cache_free(xfs_cui_zone, cuip);
  42}
  43
  44/*
  45 * Freeing the CUI requires that we remove it from the AIL if it has already
  46 * been placed there. However, the CUI may not yet have been placed in the AIL
  47 * when called by xfs_cui_release() from CUD processing due to the ordering of
  48 * committed vs unpin operations in bulk insert operations. Hence the reference
  49 * count to ensure only the last caller frees the CUI.
  50 */
  51STATIC void
  52xfs_cui_release(
  53        struct xfs_cui_log_item *cuip)
  54{
  55        ASSERT(atomic_read(&cuip->cui_refcount) > 0);
  56        if (atomic_dec_and_test(&cuip->cui_refcount)) {
  57                xfs_trans_ail_delete(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
  58                xfs_cui_item_free(cuip);
  59        }
  60}
  61
  62
  63STATIC void
  64xfs_cui_item_size(
  65        struct xfs_log_item     *lip,
  66        int                     *nvecs,
  67        int                     *nbytes)
  68{
  69        struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  70
  71        *nvecs += 1;
  72        *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
  73}
  74
  75/*
  76 * This is called to fill in the vector of log iovecs for the
  77 * given cui log item. We use only 1 iovec, and we point that
  78 * at the cui_log_format structure embedded in the cui item.
  79 * It is at this point that we assert that all of the extent
  80 * slots in the cui item have been filled.
  81 */
  82STATIC void
  83xfs_cui_item_format(
  84        struct xfs_log_item     *lip,
  85        struct xfs_log_vec      *lv)
  86{
  87        struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  88        struct xfs_log_iovec    *vecp = NULL;
  89
  90        ASSERT(atomic_read(&cuip->cui_next_extent) ==
  91                        cuip->cui_format.cui_nextents);
  92
  93        cuip->cui_format.cui_type = XFS_LI_CUI;
  94        cuip->cui_format.cui_size = 1;
  95
  96        xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
  97                        xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
  98}
  99
 100/*
 101 * The unpin operation is the last place an CUI is manipulated in the log. It is
 102 * either inserted in the AIL or aborted in the event of a log I/O error. In
 103 * either case, the CUI transaction has been successfully committed to make it
 104 * this far. Therefore, we expect whoever committed the CUI to either construct
 105 * and commit the CUD or drop the CUD's reference in the event of error. Simply
 106 * drop the log's CUI reference now that the log is done with it.
 107 */
 108STATIC void
 109xfs_cui_item_unpin(
 110        struct xfs_log_item     *lip,
 111        int                     remove)
 112{
 113        struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
 114
 115        xfs_cui_release(cuip);
 116}
 117
 118/*
 119 * The CUI has been either committed or aborted if the transaction has been
 120 * cancelled. If the transaction was cancelled, an CUD isn't going to be
 121 * constructed and thus we free the CUI here directly.
 122 */
 123STATIC void
 124xfs_cui_item_release(
 125        struct xfs_log_item     *lip)
 126{
 127        xfs_cui_release(CUI_ITEM(lip));
 128}
 129
 130/*
 131 * Allocate and initialize an cui item with the given number of extents.
 132 */
 133STATIC struct xfs_cui_log_item *
 134xfs_cui_init(
 135        struct xfs_mount                *mp,
 136        uint                            nextents)
 137
 138{
 139        struct xfs_cui_log_item         *cuip;
 140
 141        ASSERT(nextents > 0);
 142        if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
 143                cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
 144                                0);
 145        else
 146                cuip = kmem_cache_zalloc(xfs_cui_zone,
 147                                         GFP_KERNEL | __GFP_NOFAIL);
 148
 149        xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
 150        cuip->cui_format.cui_nextents = nextents;
 151        cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
 152        atomic_set(&cuip->cui_next_extent, 0);
 153        atomic_set(&cuip->cui_refcount, 2);
 154
 155        return cuip;
 156}
 157
 158static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
 159{
 160        return container_of(lip, struct xfs_cud_log_item, cud_item);
 161}
 162
 163STATIC void
 164xfs_cud_item_size(
 165        struct xfs_log_item     *lip,
 166        int                     *nvecs,
 167        int                     *nbytes)
 168{
 169        *nvecs += 1;
 170        *nbytes += sizeof(struct xfs_cud_log_format);
 171}
 172
 173/*
 174 * This is called to fill in the vector of log iovecs for the
 175 * given cud log item. We use only 1 iovec, and we point that
 176 * at the cud_log_format structure embedded in the cud item.
 177 * It is at this point that we assert that all of the extent
 178 * slots in the cud item have been filled.
 179 */
 180STATIC void
 181xfs_cud_item_format(
 182        struct xfs_log_item     *lip,
 183        struct xfs_log_vec      *lv)
 184{
 185        struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
 186        struct xfs_log_iovec    *vecp = NULL;
 187
 188        cudp->cud_format.cud_type = XFS_LI_CUD;
 189        cudp->cud_format.cud_size = 1;
 190
 191        xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
 192                        sizeof(struct xfs_cud_log_format));
 193}
 194
 195/*
 196 * The CUD is either committed or aborted if the transaction is cancelled. If
 197 * the transaction is cancelled, drop our reference to the CUI and free the
 198 * CUD.
 199 */
 200STATIC void
 201xfs_cud_item_release(
 202        struct xfs_log_item     *lip)
 203{
 204        struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
 205
 206        xfs_cui_release(cudp->cud_cuip);
 207        kmem_cache_free(xfs_cud_zone, cudp);
 208}
 209
 210static const struct xfs_item_ops xfs_cud_item_ops = {
 211        .flags          = XFS_ITEM_RELEASE_WHEN_COMMITTED,
 212        .iop_size       = xfs_cud_item_size,
 213        .iop_format     = xfs_cud_item_format,
 214        .iop_release    = xfs_cud_item_release,
 215};
 216
 217static struct xfs_cud_log_item *
 218xfs_trans_get_cud(
 219        struct xfs_trans                *tp,
 220        struct xfs_cui_log_item         *cuip)
 221{
 222        struct xfs_cud_log_item         *cudp;
 223
 224        cudp = kmem_cache_zalloc(xfs_cud_zone, GFP_KERNEL | __GFP_NOFAIL);
 225        xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
 226                          &xfs_cud_item_ops);
 227        cudp->cud_cuip = cuip;
 228        cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
 229
 230        xfs_trans_add_item(tp, &cudp->cud_item);
 231        return cudp;
 232}
 233
 234/*
 235 * Finish an refcount update and log it to the CUD. Note that the
 236 * transaction is marked dirty regardless of whether the refcount
 237 * update succeeds or fails to support the CUI/CUD lifecycle rules.
 238 */
 239static int
 240xfs_trans_log_finish_refcount_update(
 241        struct xfs_trans                *tp,
 242        struct xfs_cud_log_item         *cudp,
 243        enum xfs_refcount_intent_type   type,
 244        xfs_fsblock_t                   startblock,
 245        xfs_extlen_t                    blockcount,
 246        xfs_fsblock_t                   *new_fsb,
 247        xfs_extlen_t                    *new_len,
 248        struct xfs_btree_cur            **pcur)
 249{
 250        int                             error;
 251
 252        error = xfs_refcount_finish_one(tp, type, startblock,
 253                        blockcount, new_fsb, new_len, pcur);
 254
 255        /*
 256         * Mark the transaction dirty, even on error. This ensures the
 257         * transaction is aborted, which:
 258         *
 259         * 1.) releases the CUI and frees the CUD
 260         * 2.) shuts down the filesystem
 261         */
 262        tp->t_flags |= XFS_TRANS_DIRTY;
 263        set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
 264
 265        return error;
 266}
 267
 268/* Sort refcount intents by AG. */
 269static int
 270xfs_refcount_update_diff_items(
 271        void                            *priv,
 272        struct list_head                *a,
 273        struct list_head                *b)
 274{
 275        struct xfs_mount                *mp = priv;
 276        struct xfs_refcount_intent      *ra;
 277        struct xfs_refcount_intent      *rb;
 278
 279        ra = container_of(a, struct xfs_refcount_intent, ri_list);
 280        rb = container_of(b, struct xfs_refcount_intent, ri_list);
 281        return  XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
 282                XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
 283}
 284
 285/* Set the phys extent flags for this reverse mapping. */
 286static void
 287xfs_trans_set_refcount_flags(
 288        struct xfs_phys_extent          *refc,
 289        enum xfs_refcount_intent_type   type)
 290{
 291        refc->pe_flags = 0;
 292        switch (type) {
 293        case XFS_REFCOUNT_INCREASE:
 294        case XFS_REFCOUNT_DECREASE:
 295        case XFS_REFCOUNT_ALLOC_COW:
 296        case XFS_REFCOUNT_FREE_COW:
 297                refc->pe_flags |= type;
 298                break;
 299        default:
 300                ASSERT(0);
 301        }
 302}
 303
 304/* Log refcount updates in the intent item. */
 305STATIC void
 306xfs_refcount_update_log_item(
 307        struct xfs_trans                *tp,
 308        struct xfs_cui_log_item         *cuip,
 309        struct xfs_refcount_intent      *refc)
 310{
 311        uint                            next_extent;
 312        struct xfs_phys_extent          *ext;
 313
 314        tp->t_flags |= XFS_TRANS_DIRTY;
 315        set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
 316
 317        /*
 318         * atomic_inc_return gives us the value after the increment;
 319         * we want to use it as an array index so we need to subtract 1 from
 320         * it.
 321         */
 322        next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
 323        ASSERT(next_extent < cuip->cui_format.cui_nextents);
 324        ext = &cuip->cui_format.cui_extents[next_extent];
 325        ext->pe_startblock = refc->ri_startblock;
 326        ext->pe_len = refc->ri_blockcount;
 327        xfs_trans_set_refcount_flags(ext, refc->ri_type);
 328}
 329
 330static struct xfs_log_item *
 331xfs_refcount_update_create_intent(
 332        struct xfs_trans                *tp,
 333        struct list_head                *items,
 334        unsigned int                    count,
 335        bool                            sort)
 336{
 337        struct xfs_mount                *mp = tp->t_mountp;
 338        struct xfs_cui_log_item         *cuip = xfs_cui_init(mp, count);
 339        struct xfs_refcount_intent      *refc;
 340
 341        ASSERT(count > 0);
 342
 343        xfs_trans_add_item(tp, &cuip->cui_item);
 344        if (sort)
 345                list_sort(mp, items, xfs_refcount_update_diff_items);
 346        list_for_each_entry(refc, items, ri_list)
 347                xfs_refcount_update_log_item(tp, cuip, refc);
 348        return &cuip->cui_item;
 349}
 350
 351/* Get an CUD so we can process all the deferred refcount updates. */
 352static struct xfs_log_item *
 353xfs_refcount_update_create_done(
 354        struct xfs_trans                *tp,
 355        struct xfs_log_item             *intent,
 356        unsigned int                    count)
 357{
 358        return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
 359}
 360
 361/* Process a deferred refcount update. */
 362STATIC int
 363xfs_refcount_update_finish_item(
 364        struct xfs_trans                *tp,
 365        struct xfs_log_item             *done,
 366        struct list_head                *item,
 367        struct xfs_btree_cur            **state)
 368{
 369        struct xfs_refcount_intent      *refc;
 370        xfs_fsblock_t                   new_fsb;
 371        xfs_extlen_t                    new_aglen;
 372        int                             error;
 373
 374        refc = container_of(item, struct xfs_refcount_intent, ri_list);
 375        error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done),
 376                        refc->ri_type, refc->ri_startblock, refc->ri_blockcount,
 377                        &new_fsb, &new_aglen, state);
 378
 379        /* Did we run out of reservation?  Requeue what we didn't finish. */
 380        if (!error && new_aglen > 0) {
 381                ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
 382                       refc->ri_type == XFS_REFCOUNT_DECREASE);
 383                refc->ri_startblock = new_fsb;
 384                refc->ri_blockcount = new_aglen;
 385                return -EAGAIN;
 386        }
 387        kmem_free(refc);
 388        return error;
 389}
 390
 391/* Abort all pending CUIs. */
 392STATIC void
 393xfs_refcount_update_abort_intent(
 394        struct xfs_log_item             *intent)
 395{
 396        xfs_cui_release(CUI_ITEM(intent));
 397}
 398
 399/* Cancel a deferred refcount update. */
 400STATIC void
 401xfs_refcount_update_cancel_item(
 402        struct list_head                *item)
 403{
 404        struct xfs_refcount_intent      *refc;
 405
 406        refc = container_of(item, struct xfs_refcount_intent, ri_list);
 407        kmem_free(refc);
 408}
 409
 410const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
 411        .max_items      = XFS_CUI_MAX_FAST_EXTENTS,
 412        .create_intent  = xfs_refcount_update_create_intent,
 413        .abort_intent   = xfs_refcount_update_abort_intent,
 414        .create_done    = xfs_refcount_update_create_done,
 415        .finish_item    = xfs_refcount_update_finish_item,
 416        .finish_cleanup = xfs_refcount_finish_one_cleanup,
 417        .cancel_item    = xfs_refcount_update_cancel_item,
 418};
 419
 420/*
 421 * Process a refcount update intent item that was recovered from the log.
 422 * We need to update the refcountbt.
 423 */
 424STATIC int
 425xfs_cui_item_recover(
 426        struct xfs_log_item             *lip,
 427        struct list_head                *capture_list)
 428{
 429        struct xfs_bmbt_irec            irec;
 430        struct xfs_cui_log_item         *cuip = CUI_ITEM(lip);
 431        struct xfs_phys_extent          *refc;
 432        struct xfs_cud_log_item         *cudp;
 433        struct xfs_trans                *tp;
 434        struct xfs_btree_cur            *rcur = NULL;
 435        struct xfs_mount                *mp = lip->li_mountp;
 436        xfs_fsblock_t                   startblock_fsb;
 437        xfs_fsblock_t                   new_fsb;
 438        xfs_extlen_t                    new_len;
 439        unsigned int                    refc_type;
 440        bool                            op_ok;
 441        bool                            requeue_only = false;
 442        enum xfs_refcount_intent_type   type;
 443        int                             i;
 444        int                             error = 0;
 445
 446        /*
 447         * First check the validity of the extents described by the
 448         * CUI.  If any are bad, then assume that all are bad and
 449         * just toss the CUI.
 450         */
 451        for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
 452                refc = &cuip->cui_format.cui_extents[i];
 453                startblock_fsb = XFS_BB_TO_FSB(mp,
 454                                   XFS_FSB_TO_DADDR(mp, refc->pe_startblock));
 455                switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
 456                case XFS_REFCOUNT_INCREASE:
 457                case XFS_REFCOUNT_DECREASE:
 458                case XFS_REFCOUNT_ALLOC_COW:
 459                case XFS_REFCOUNT_FREE_COW:
 460                        op_ok = true;
 461                        break;
 462                default:
 463                        op_ok = false;
 464                        break;
 465                }
 466                if (!op_ok || startblock_fsb == 0 ||
 467                    refc->pe_len == 0 ||
 468                    startblock_fsb >= mp->m_sb.sb_dblocks ||
 469                    refc->pe_len >= mp->m_sb.sb_agblocks ||
 470                    (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS))
 471                        return -EFSCORRUPTED;
 472        }
 473
 474        /*
 475         * Under normal operation, refcount updates are deferred, so we
 476         * wouldn't be adding them directly to a transaction.  All
 477         * refcount updates manage reservation usage internally and
 478         * dynamically by deferring work that won't fit in the
 479         * transaction.  Normally, any work that needs to be deferred
 480         * gets attached to the same defer_ops that scheduled the
 481         * refcount update.  However, we're in log recovery here, so we
 482         * use the passed in defer_ops and to finish up any work that
 483         * doesn't fit.  We need to reserve enough blocks to handle a
 484         * full btree split on either end of the refcount range.
 485         */
 486        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
 487                        mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
 488        if (error)
 489                return error;
 490
 491        cudp = xfs_trans_get_cud(tp, cuip);
 492
 493        for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
 494                refc = &cuip->cui_format.cui_extents[i];
 495                refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
 496                switch (refc_type) {
 497                case XFS_REFCOUNT_INCREASE:
 498                case XFS_REFCOUNT_DECREASE:
 499                case XFS_REFCOUNT_ALLOC_COW:
 500                case XFS_REFCOUNT_FREE_COW:
 501                        type = refc_type;
 502                        break;
 503                default:
 504                        XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
 505                        error = -EFSCORRUPTED;
 506                        goto abort_error;
 507                }
 508                if (requeue_only) {
 509                        new_fsb = refc->pe_startblock;
 510                        new_len = refc->pe_len;
 511                } else
 512                        error = xfs_trans_log_finish_refcount_update(tp, cudp,
 513                                type, refc->pe_startblock, refc->pe_len,
 514                                &new_fsb, &new_len, &rcur);
 515                if (error)
 516                        goto abort_error;
 517
 518                /* Requeue what we didn't finish. */
 519                if (new_len > 0) {
 520                        irec.br_startblock = new_fsb;
 521                        irec.br_blockcount = new_len;
 522                        switch (type) {
 523                        case XFS_REFCOUNT_INCREASE:
 524                                xfs_refcount_increase_extent(tp, &irec);
 525                                break;
 526                        case XFS_REFCOUNT_DECREASE:
 527                                xfs_refcount_decrease_extent(tp, &irec);
 528                                break;
 529                        case XFS_REFCOUNT_ALLOC_COW:
 530                                xfs_refcount_alloc_cow_extent(tp,
 531                                                irec.br_startblock,
 532                                                irec.br_blockcount);
 533                                break;
 534                        case XFS_REFCOUNT_FREE_COW:
 535                                xfs_refcount_free_cow_extent(tp,
 536                                                irec.br_startblock,
 537                                                irec.br_blockcount);
 538                                break;
 539                        default:
 540                                ASSERT(0);
 541                        }
 542                        requeue_only = true;
 543                }
 544        }
 545
 546        xfs_refcount_finish_one_cleanup(tp, rcur, error);
 547        return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
 548
 549abort_error:
 550        xfs_refcount_finish_one_cleanup(tp, rcur, error);
 551        xfs_trans_cancel(tp);
 552        return error;
 553}
 554
 555STATIC bool
 556xfs_cui_item_match(
 557        struct xfs_log_item     *lip,
 558        uint64_t                intent_id)
 559{
 560        return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
 561}
 562
 563/* Relog an intent item to push the log tail forward. */
 564static struct xfs_log_item *
 565xfs_cui_item_relog(
 566        struct xfs_log_item             *intent,
 567        struct xfs_trans                *tp)
 568{
 569        struct xfs_cud_log_item         *cudp;
 570        struct xfs_cui_log_item         *cuip;
 571        struct xfs_phys_extent          *extp;
 572        unsigned int                    count;
 573
 574        count = CUI_ITEM(intent)->cui_format.cui_nextents;
 575        extp = CUI_ITEM(intent)->cui_format.cui_extents;
 576
 577        tp->t_flags |= XFS_TRANS_DIRTY;
 578        cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
 579        set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
 580
 581        cuip = xfs_cui_init(tp->t_mountp, count);
 582        memcpy(cuip->cui_format.cui_extents, extp, count * sizeof(*extp));
 583        atomic_set(&cuip->cui_next_extent, count);
 584        xfs_trans_add_item(tp, &cuip->cui_item);
 585        set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
 586        return &cuip->cui_item;
 587}
 588
 589static const struct xfs_item_ops xfs_cui_item_ops = {
 590        .iop_size       = xfs_cui_item_size,
 591        .iop_format     = xfs_cui_item_format,
 592        .iop_unpin      = xfs_cui_item_unpin,
 593        .iop_release    = xfs_cui_item_release,
 594        .iop_recover    = xfs_cui_item_recover,
 595        .iop_match      = xfs_cui_item_match,
 596        .iop_relog      = xfs_cui_item_relog,
 597};
 598
 599/*
 600 * Copy an CUI format buffer from the given buf, and into the destination
 601 * CUI format structure.  The CUI/CUD items were designed not to need any
 602 * special alignment handling.
 603 */
 604static int
 605xfs_cui_copy_format(
 606        struct xfs_log_iovec            *buf,
 607        struct xfs_cui_log_format       *dst_cui_fmt)
 608{
 609        struct xfs_cui_log_format       *src_cui_fmt;
 610        uint                            len;
 611
 612        src_cui_fmt = buf->i_addr;
 613        len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
 614
 615        if (buf->i_len == len) {
 616                memcpy(dst_cui_fmt, src_cui_fmt, len);
 617                return 0;
 618        }
 619        XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
 620        return -EFSCORRUPTED;
 621}
 622
 623/*
 624 * This routine is called to create an in-core extent refcount update
 625 * item from the cui format structure which was logged on disk.
 626 * It allocates an in-core cui, copies the extents from the format
 627 * structure into it, and adds the cui to the AIL with the given
 628 * LSN.
 629 */
 630STATIC int
 631xlog_recover_cui_commit_pass2(
 632        struct xlog                     *log,
 633        struct list_head                *buffer_list,
 634        struct xlog_recover_item        *item,
 635        xfs_lsn_t                       lsn)
 636{
 637        int                             error;
 638        struct xfs_mount                *mp = log->l_mp;
 639        struct xfs_cui_log_item         *cuip;
 640        struct xfs_cui_log_format       *cui_formatp;
 641
 642        cui_formatp = item->ri_buf[0].i_addr;
 643
 644        cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
 645        error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
 646        if (error) {
 647                xfs_cui_item_free(cuip);
 648                return error;
 649        }
 650        atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
 651        /*
 652         * Insert the intent into the AIL directly and drop one reference so
 653         * that finishing or canceling the work will drop the other.
 654         */
 655        xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
 656        xfs_cui_release(cuip);
 657        return 0;
 658}
 659
 660const struct xlog_recover_item_ops xlog_cui_item_ops = {
 661        .item_type              = XFS_LI_CUI,
 662        .commit_pass2           = xlog_recover_cui_commit_pass2,
 663};
 664
 665/*
 666 * This routine is called when an CUD format structure is found in a committed
 667 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
 668 * was still in the log. To do this it searches the AIL for the CUI with an id
 669 * equal to that in the CUD format structure. If we find it we drop the CUD
 670 * reference, which removes the CUI from the AIL and frees it.
 671 */
 672STATIC int
 673xlog_recover_cud_commit_pass2(
 674        struct xlog                     *log,
 675        struct list_head                *buffer_list,
 676        struct xlog_recover_item        *item,
 677        xfs_lsn_t                       lsn)
 678{
 679        struct xfs_cud_log_format       *cud_formatp;
 680
 681        cud_formatp = item->ri_buf[0].i_addr;
 682        if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
 683                XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
 684                return -EFSCORRUPTED;
 685        }
 686
 687        xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
 688        return 0;
 689}
 690
 691const struct xlog_recover_item_ops xlog_cud_item_ops = {
 692        .item_type              = XFS_LI_CUD,
 693        .commit_pass2           = xlog_recover_cud_commit_pass2,
 694};
 695