linux/fs/xfs/xfs_rmap_item.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2016 Oracle.  All Rights Reserved.
   4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_format.h"
   9#include "xfs_log_format.h"
  10#include "xfs_trans_resv.h"
  11#include "xfs_bit.h"
  12#include "xfs_shared.h"
  13#include "xfs_mount.h"
  14#include "xfs_defer.h"
  15#include "xfs_trans.h"
  16#include "xfs_trans_priv.h"
  17#include "xfs_rmap_item.h"
  18#include "xfs_log.h"
  19#include "xfs_rmap.h"
  20#include "xfs_error.h"
  21#include "xfs_log_priv.h"
  22#include "xfs_log_recover.h"
  23
  24struct kmem_cache       *xfs_rui_cache;
  25struct kmem_cache       *xfs_rud_cache;
  26
  27static const struct xfs_item_ops xfs_rui_item_ops;
  28
  29static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
  30{
  31        return container_of(lip, struct xfs_rui_log_item, rui_item);
  32}
  33
  34STATIC void
  35xfs_rui_item_free(
  36        struct xfs_rui_log_item *ruip)
  37{
  38        kmem_free(ruip->rui_item.li_lv_shadow);
  39        if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
  40                kmem_free(ruip);
  41        else
  42                kmem_cache_free(xfs_rui_cache, ruip);
  43}
  44
  45/*
  46 * Freeing the RUI requires that we remove it from the AIL if it has already
  47 * been placed there. However, the RUI may not yet have been placed in the AIL
  48 * when called by xfs_rui_release() from RUD processing due to the ordering of
  49 * committed vs unpin operations in bulk insert operations. Hence the reference
  50 * count to ensure only the last caller frees the RUI.
  51 */
  52STATIC void
  53xfs_rui_release(
  54        struct xfs_rui_log_item *ruip)
  55{
  56        ASSERT(atomic_read(&ruip->rui_refcount) > 0);
  57        if (!atomic_dec_and_test(&ruip->rui_refcount))
  58                return;
  59
  60        xfs_trans_ail_delete(&ruip->rui_item, 0);
  61        xfs_rui_item_free(ruip);
  62}
  63
  64STATIC void
  65xfs_rui_item_size(
  66        struct xfs_log_item     *lip,
  67        int                     *nvecs,
  68        int                     *nbytes)
  69{
  70        struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
  71
  72        *nvecs += 1;
  73        *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
  74}
  75
  76/*
  77 * This is called to fill in the vector of log iovecs for the
  78 * given rui log item. We use only 1 iovec, and we point that
  79 * at the rui_log_format structure embedded in the rui item.
  80 * It is at this point that we assert that all of the extent
  81 * slots in the rui item have been filled.
  82 */
  83STATIC void
  84xfs_rui_item_format(
  85        struct xfs_log_item     *lip,
  86        struct xfs_log_vec      *lv)
  87{
  88        struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
  89        struct xfs_log_iovec    *vecp = NULL;
  90
  91        ASSERT(atomic_read(&ruip->rui_next_extent) ==
  92                        ruip->rui_format.rui_nextents);
  93
  94        ruip->rui_format.rui_type = XFS_LI_RUI;
  95        ruip->rui_format.rui_size = 1;
  96
  97        xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
  98                        xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
  99}
 100
 101/*
 102 * The unpin operation is the last place an RUI is manipulated in the log. It is
 103 * either inserted in the AIL or aborted in the event of a log I/O error. In
 104 * either case, the RUI transaction has been successfully committed to make it
 105 * this far. Therefore, we expect whoever committed the RUI to either construct
 106 * and commit the RUD or drop the RUD's reference in the event of error. Simply
 107 * drop the log's RUI reference now that the log is done with it.
 108 */
 109STATIC void
 110xfs_rui_item_unpin(
 111        struct xfs_log_item     *lip,
 112        int                     remove)
 113{
 114        struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
 115
 116        xfs_rui_release(ruip);
 117}
 118
 119/*
 120 * The RUI has been either committed or aborted if the transaction has been
 121 * cancelled. If the transaction was cancelled, an RUD isn't going to be
 122 * constructed and thus we free the RUI here directly.
 123 */
 124STATIC void
 125xfs_rui_item_release(
 126        struct xfs_log_item     *lip)
 127{
 128        xfs_rui_release(RUI_ITEM(lip));
 129}
 130
 131/*
 132 * Allocate and initialize an rui item with the given number of extents.
 133 */
 134STATIC struct xfs_rui_log_item *
 135xfs_rui_init(
 136        struct xfs_mount                *mp,
 137        uint                            nextents)
 138
 139{
 140        struct xfs_rui_log_item         *ruip;
 141
 142        ASSERT(nextents > 0);
 143        if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
 144                ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
 145        else
 146                ruip = kmem_cache_zalloc(xfs_rui_cache,
 147                                         GFP_KERNEL | __GFP_NOFAIL);
 148
 149        xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
 150        ruip->rui_format.rui_nextents = nextents;
 151        ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
 152        atomic_set(&ruip->rui_next_extent, 0);
 153        atomic_set(&ruip->rui_refcount, 2);
 154
 155        return ruip;
 156}
 157
 158/*
 159 * Copy an RUI format buffer from the given buf, and into the destination
 160 * RUI format structure.  The RUI/RUD items were designed not to need any
 161 * special alignment handling.
 162 */
 163STATIC int
 164xfs_rui_copy_format(
 165        struct xfs_log_iovec            *buf,
 166        struct xfs_rui_log_format       *dst_rui_fmt)
 167{
 168        struct xfs_rui_log_format       *src_rui_fmt;
 169        uint                            len;
 170
 171        src_rui_fmt = buf->i_addr;
 172        len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
 173
 174        if (buf->i_len != len) {
 175                XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
 176                return -EFSCORRUPTED;
 177        }
 178
 179        memcpy(dst_rui_fmt, src_rui_fmt, len);
 180        return 0;
 181}
 182
 183static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
 184{
 185        return container_of(lip, struct xfs_rud_log_item, rud_item);
 186}
 187
 188STATIC void
 189xfs_rud_item_size(
 190        struct xfs_log_item     *lip,
 191        int                     *nvecs,
 192        int                     *nbytes)
 193{
 194        *nvecs += 1;
 195        *nbytes += sizeof(struct xfs_rud_log_format);
 196}
 197
 198/*
 199 * This is called to fill in the vector of log iovecs for the
 200 * given rud log item. We use only 1 iovec, and we point that
 201 * at the rud_log_format structure embedded in the rud item.
 202 * It is at this point that we assert that all of the extent
 203 * slots in the rud item have been filled.
 204 */
 205STATIC void
 206xfs_rud_item_format(
 207        struct xfs_log_item     *lip,
 208        struct xfs_log_vec      *lv)
 209{
 210        struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
 211        struct xfs_log_iovec    *vecp = NULL;
 212
 213        rudp->rud_format.rud_type = XFS_LI_RUD;
 214        rudp->rud_format.rud_size = 1;
 215
 216        xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
 217                        sizeof(struct xfs_rud_log_format));
 218}
 219
 220/*
 221 * The RUD is either committed or aborted if the transaction is cancelled. If
 222 * the transaction is cancelled, drop our reference to the RUI and free the
 223 * RUD.
 224 */
 225STATIC void
 226xfs_rud_item_release(
 227        struct xfs_log_item     *lip)
 228{
 229        struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
 230
 231        xfs_rui_release(rudp->rud_ruip);
 232        kmem_free(rudp->rud_item.li_lv_shadow);
 233        kmem_cache_free(xfs_rud_cache, rudp);
 234}
 235
 236static struct xfs_log_item *
 237xfs_rud_item_intent(
 238        struct xfs_log_item     *lip)
 239{
 240        return &RUD_ITEM(lip)->rud_ruip->rui_item;
 241}
 242
 243static const struct xfs_item_ops xfs_rud_item_ops = {
 244        .flags          = XFS_ITEM_RELEASE_WHEN_COMMITTED |
 245                          XFS_ITEM_INTENT_DONE,
 246        .iop_size       = xfs_rud_item_size,
 247        .iop_format     = xfs_rud_item_format,
 248        .iop_release    = xfs_rud_item_release,
 249        .iop_intent     = xfs_rud_item_intent,
 250};
 251
 252static struct xfs_rud_log_item *
 253xfs_trans_get_rud(
 254        struct xfs_trans                *tp,
 255        struct xfs_rui_log_item         *ruip)
 256{
 257        struct xfs_rud_log_item         *rudp;
 258
 259        rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
 260        xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
 261                          &xfs_rud_item_ops);
 262        rudp->rud_ruip = ruip;
 263        rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
 264
 265        xfs_trans_add_item(tp, &rudp->rud_item);
 266        return rudp;
 267}
 268
 269/* Set the map extent flags for this reverse mapping. */
 270static void
 271xfs_trans_set_rmap_flags(
 272        struct xfs_map_extent           *rmap,
 273        enum xfs_rmap_intent_type       type,
 274        int                             whichfork,
 275        xfs_exntst_t                    state)
 276{
 277        rmap->me_flags = 0;
 278        if (state == XFS_EXT_UNWRITTEN)
 279                rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
 280        if (whichfork == XFS_ATTR_FORK)
 281                rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
 282        switch (type) {
 283        case XFS_RMAP_MAP:
 284                rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
 285                break;
 286        case XFS_RMAP_MAP_SHARED:
 287                rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
 288                break;
 289        case XFS_RMAP_UNMAP:
 290                rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
 291                break;
 292        case XFS_RMAP_UNMAP_SHARED:
 293                rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
 294                break;
 295        case XFS_RMAP_CONVERT:
 296                rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
 297                break;
 298        case XFS_RMAP_CONVERT_SHARED:
 299                rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
 300                break;
 301        case XFS_RMAP_ALLOC:
 302                rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
 303                break;
 304        case XFS_RMAP_FREE:
 305                rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
 306                break;
 307        default:
 308                ASSERT(0);
 309        }
 310}
 311
 312/*
 313 * Finish an rmap update and log it to the RUD. Note that the transaction is
 314 * marked dirty regardless of whether the rmap update succeeds or fails to
 315 * support the RUI/RUD lifecycle rules.
 316 */
 317static int
 318xfs_trans_log_finish_rmap_update(
 319        struct xfs_trans                *tp,
 320        struct xfs_rud_log_item         *rudp,
 321        enum xfs_rmap_intent_type       type,
 322        uint64_t                        owner,
 323        int                             whichfork,
 324        xfs_fileoff_t                   startoff,
 325        xfs_fsblock_t                   startblock,
 326        xfs_filblks_t                   blockcount,
 327        xfs_exntst_t                    state,
 328        struct xfs_btree_cur            **pcur)
 329{
 330        int                             error;
 331
 332        error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
 333                        startblock, blockcount, state, pcur);
 334
 335        /*
 336         * Mark the transaction dirty, even on error. This ensures the
 337         * transaction is aborted, which:
 338         *
 339         * 1.) releases the RUI and frees the RUD
 340         * 2.) shuts down the filesystem
 341         */
 342        tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
 343        set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
 344
 345        return error;
 346}
 347
 348/* Sort rmap intents by AG. */
 349static int
 350xfs_rmap_update_diff_items(
 351        void                            *priv,
 352        const struct list_head          *a,
 353        const struct list_head          *b)
 354{
 355        struct xfs_mount                *mp = priv;
 356        struct xfs_rmap_intent          *ra;
 357        struct xfs_rmap_intent          *rb;
 358
 359        ra = container_of(a, struct xfs_rmap_intent, ri_list);
 360        rb = container_of(b, struct xfs_rmap_intent, ri_list);
 361        return  XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
 362                XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
 363}
 364
 365/* Log rmap updates in the intent item. */
 366STATIC void
 367xfs_rmap_update_log_item(
 368        struct xfs_trans                *tp,
 369        struct xfs_rui_log_item         *ruip,
 370        struct xfs_rmap_intent          *rmap)
 371{
 372        uint                            next_extent;
 373        struct xfs_map_extent           *map;
 374
 375        tp->t_flags |= XFS_TRANS_DIRTY;
 376        set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
 377
 378        /*
 379         * atomic_inc_return gives us the value after the increment;
 380         * we want to use it as an array index so we need to subtract 1 from
 381         * it.
 382         */
 383        next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
 384        ASSERT(next_extent < ruip->rui_format.rui_nextents);
 385        map = &ruip->rui_format.rui_extents[next_extent];
 386        map->me_owner = rmap->ri_owner;
 387        map->me_startblock = rmap->ri_bmap.br_startblock;
 388        map->me_startoff = rmap->ri_bmap.br_startoff;
 389        map->me_len = rmap->ri_bmap.br_blockcount;
 390        xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
 391                        rmap->ri_bmap.br_state);
 392}
 393
 394static struct xfs_log_item *
 395xfs_rmap_update_create_intent(
 396        struct xfs_trans                *tp,
 397        struct list_head                *items,
 398        unsigned int                    count,
 399        bool                            sort)
 400{
 401        struct xfs_mount                *mp = tp->t_mountp;
 402        struct xfs_rui_log_item         *ruip = xfs_rui_init(mp, count);
 403        struct xfs_rmap_intent          *rmap;
 404
 405        ASSERT(count > 0);
 406
 407        xfs_trans_add_item(tp, &ruip->rui_item);
 408        if (sort)
 409                list_sort(mp, items, xfs_rmap_update_diff_items);
 410        list_for_each_entry(rmap, items, ri_list)
 411                xfs_rmap_update_log_item(tp, ruip, rmap);
 412        return &ruip->rui_item;
 413}
 414
 415/* Get an RUD so we can process all the deferred rmap updates. */
 416static struct xfs_log_item *
 417xfs_rmap_update_create_done(
 418        struct xfs_trans                *tp,
 419        struct xfs_log_item             *intent,
 420        unsigned int                    count)
 421{
 422        return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
 423}
 424
 425/* Process a deferred rmap update. */
 426STATIC int
 427xfs_rmap_update_finish_item(
 428        struct xfs_trans                *tp,
 429        struct xfs_log_item             *done,
 430        struct list_head                *item,
 431        struct xfs_btree_cur            **state)
 432{
 433        struct xfs_rmap_intent          *rmap;
 434        int                             error;
 435
 436        rmap = container_of(item, struct xfs_rmap_intent, ri_list);
 437        error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done),
 438                        rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork,
 439                        rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
 440                        rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
 441                        state);
 442        kmem_cache_free(xfs_rmap_intent_cache, rmap);
 443        return error;
 444}
 445
 446/* Abort all pending RUIs. */
 447STATIC void
 448xfs_rmap_update_abort_intent(
 449        struct xfs_log_item     *intent)
 450{
 451        xfs_rui_release(RUI_ITEM(intent));
 452}
 453
 454/* Cancel a deferred rmap update. */
 455STATIC void
 456xfs_rmap_update_cancel_item(
 457        struct list_head                *item)
 458{
 459        struct xfs_rmap_intent          *rmap;
 460
 461        rmap = container_of(item, struct xfs_rmap_intent, ri_list);
 462        kmem_cache_free(xfs_rmap_intent_cache, rmap);
 463}
 464
 465const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
 466        .max_items      = XFS_RUI_MAX_FAST_EXTENTS,
 467        .create_intent  = xfs_rmap_update_create_intent,
 468        .abort_intent   = xfs_rmap_update_abort_intent,
 469        .create_done    = xfs_rmap_update_create_done,
 470        .finish_item    = xfs_rmap_update_finish_item,
 471        .finish_cleanup = xfs_rmap_finish_one_cleanup,
 472        .cancel_item    = xfs_rmap_update_cancel_item,
 473};
 474
 475/* Is this recovered RUI ok? */
 476static inline bool
 477xfs_rui_validate_map(
 478        struct xfs_mount                *mp,
 479        struct xfs_map_extent           *rmap)
 480{
 481        if (!xfs_has_rmapbt(mp))
 482                return false;
 483
 484        if (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
 485                return false;
 486
 487        switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
 488        case XFS_RMAP_EXTENT_MAP:
 489        case XFS_RMAP_EXTENT_MAP_SHARED:
 490        case XFS_RMAP_EXTENT_UNMAP:
 491        case XFS_RMAP_EXTENT_UNMAP_SHARED:
 492        case XFS_RMAP_EXTENT_CONVERT:
 493        case XFS_RMAP_EXTENT_CONVERT_SHARED:
 494        case XFS_RMAP_EXTENT_ALLOC:
 495        case XFS_RMAP_EXTENT_FREE:
 496                break;
 497        default:
 498                return false;
 499        }
 500
 501        if (!XFS_RMAP_NON_INODE_OWNER(rmap->me_owner) &&
 502            !xfs_verify_ino(mp, rmap->me_owner))
 503                return false;
 504
 505        if (!xfs_verify_fileext(mp, rmap->me_startoff, rmap->me_len))
 506                return false;
 507
 508        return xfs_verify_fsbext(mp, rmap->me_startblock, rmap->me_len);
 509}
 510
 511/*
 512 * Process an rmap update intent item that was recovered from the log.
 513 * We need to update the rmapbt.
 514 */
 515STATIC int
 516xfs_rui_item_recover(
 517        struct xfs_log_item             *lip,
 518        struct list_head                *capture_list)
 519{
 520        struct xfs_rui_log_item         *ruip = RUI_ITEM(lip);
 521        struct xfs_map_extent           *rmap;
 522        struct xfs_rud_log_item         *rudp;
 523        struct xfs_trans                *tp;
 524        struct xfs_btree_cur            *rcur = NULL;
 525        struct xfs_mount                *mp = lip->li_log->l_mp;
 526        enum xfs_rmap_intent_type       type;
 527        xfs_exntst_t                    state;
 528        int                             i;
 529        int                             whichfork;
 530        int                             error = 0;
 531
 532        /*
 533         * First check the validity of the extents described by the
 534         * RUI.  If any are bad, then assume that all are bad and
 535         * just toss the RUI.
 536         */
 537        for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
 538                if (!xfs_rui_validate_map(mp,
 539                                        &ruip->rui_format.rui_extents[i])) {
 540                        XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
 541                                        &ruip->rui_format,
 542                                        sizeof(ruip->rui_format));
 543                        return -EFSCORRUPTED;
 544                }
 545        }
 546
 547        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
 548                        mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
 549        if (error)
 550                return error;
 551        rudp = xfs_trans_get_rud(tp, ruip);
 552
 553        for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
 554                rmap = &ruip->rui_format.rui_extents[i];
 555                state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
 556                                XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
 557                whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
 558                                XFS_ATTR_FORK : XFS_DATA_FORK;
 559                switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
 560                case XFS_RMAP_EXTENT_MAP:
 561                        type = XFS_RMAP_MAP;
 562                        break;
 563                case XFS_RMAP_EXTENT_MAP_SHARED:
 564                        type = XFS_RMAP_MAP_SHARED;
 565                        break;
 566                case XFS_RMAP_EXTENT_UNMAP:
 567                        type = XFS_RMAP_UNMAP;
 568                        break;
 569                case XFS_RMAP_EXTENT_UNMAP_SHARED:
 570                        type = XFS_RMAP_UNMAP_SHARED;
 571                        break;
 572                case XFS_RMAP_EXTENT_CONVERT:
 573                        type = XFS_RMAP_CONVERT;
 574                        break;
 575                case XFS_RMAP_EXTENT_CONVERT_SHARED:
 576                        type = XFS_RMAP_CONVERT_SHARED;
 577                        break;
 578                case XFS_RMAP_EXTENT_ALLOC:
 579                        type = XFS_RMAP_ALLOC;
 580                        break;
 581                case XFS_RMAP_EXTENT_FREE:
 582                        type = XFS_RMAP_FREE;
 583                        break;
 584                default:
 585                        XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
 586                        error = -EFSCORRUPTED;
 587                        goto abort_error;
 588                }
 589                error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
 590                                rmap->me_owner, whichfork,
 591                                rmap->me_startoff, rmap->me_startblock,
 592                                rmap->me_len, state, &rcur);
 593                if (error == -EFSCORRUPTED)
 594                        XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
 595                                        rmap, sizeof(*rmap));
 596                if (error)
 597                        goto abort_error;
 598
 599        }
 600
 601        xfs_rmap_finish_one_cleanup(tp, rcur, error);
 602        return xfs_defer_ops_capture_and_commit(tp, capture_list);
 603
 604abort_error:
 605        xfs_rmap_finish_one_cleanup(tp, rcur, error);
 606        xfs_trans_cancel(tp);
 607        return error;
 608}
 609
 610STATIC bool
 611xfs_rui_item_match(
 612        struct xfs_log_item     *lip,
 613        uint64_t                intent_id)
 614{
 615        return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
 616}
 617
 618/* Relog an intent item to push the log tail forward. */
 619static struct xfs_log_item *
 620xfs_rui_item_relog(
 621        struct xfs_log_item             *intent,
 622        struct xfs_trans                *tp)
 623{
 624        struct xfs_rud_log_item         *rudp;
 625        struct xfs_rui_log_item         *ruip;
 626        struct xfs_map_extent           *extp;
 627        unsigned int                    count;
 628
 629        count = RUI_ITEM(intent)->rui_format.rui_nextents;
 630        extp = RUI_ITEM(intent)->rui_format.rui_extents;
 631
 632        tp->t_flags |= XFS_TRANS_DIRTY;
 633        rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
 634        set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
 635
 636        ruip = xfs_rui_init(tp->t_mountp, count);
 637        memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
 638        atomic_set(&ruip->rui_next_extent, count);
 639        xfs_trans_add_item(tp, &ruip->rui_item);
 640        set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
 641        return &ruip->rui_item;
 642}
 643
 644static const struct xfs_item_ops xfs_rui_item_ops = {
 645        .flags          = XFS_ITEM_INTENT,
 646        .iop_size       = xfs_rui_item_size,
 647        .iop_format     = xfs_rui_item_format,
 648        .iop_unpin      = xfs_rui_item_unpin,
 649        .iop_release    = xfs_rui_item_release,
 650        .iop_recover    = xfs_rui_item_recover,
 651        .iop_match      = xfs_rui_item_match,
 652        .iop_relog      = xfs_rui_item_relog,
 653};
 654
 655/*
 656 * This routine is called to create an in-core extent rmap update
 657 * item from the rui format structure which was logged on disk.
 658 * It allocates an in-core rui, copies the extents from the format
 659 * structure into it, and adds the rui to the AIL with the given
 660 * LSN.
 661 */
 662STATIC int
 663xlog_recover_rui_commit_pass2(
 664        struct xlog                     *log,
 665        struct list_head                *buffer_list,
 666        struct xlog_recover_item        *item,
 667        xfs_lsn_t                       lsn)
 668{
 669        int                             error;
 670        struct xfs_mount                *mp = log->l_mp;
 671        struct xfs_rui_log_item         *ruip;
 672        struct xfs_rui_log_format       *rui_formatp;
 673
 674        rui_formatp = item->ri_buf[0].i_addr;
 675
 676        ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
 677        error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
 678        if (error) {
 679                xfs_rui_item_free(ruip);
 680                return error;
 681        }
 682        atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
 683        /*
 684         * Insert the intent into the AIL directly and drop one reference so
 685         * that finishing or canceling the work will drop the other.
 686         */
 687        xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
 688        xfs_rui_release(ruip);
 689        return 0;
 690}
 691
 692const struct xlog_recover_item_ops xlog_rui_item_ops = {
 693        .item_type              = XFS_LI_RUI,
 694        .commit_pass2           = xlog_recover_rui_commit_pass2,
 695};
 696
 697/*
 698 * This routine is called when an RUD format structure is found in a committed
 699 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
 700 * was still in the log. To do this it searches the AIL for the RUI with an id
 701 * equal to that in the RUD format structure. If we find it we drop the RUD
 702 * reference, which removes the RUI from the AIL and frees it.
 703 */
 704STATIC int
 705xlog_recover_rud_commit_pass2(
 706        struct xlog                     *log,
 707        struct list_head                *buffer_list,
 708        struct xlog_recover_item        *item,
 709        xfs_lsn_t                       lsn)
 710{
 711        struct xfs_rud_log_format       *rud_formatp;
 712
 713        rud_formatp = item->ri_buf[0].i_addr;
 714        ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
 715
 716        xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
 717        return 0;
 718}
 719
 720const struct xlog_recover_item_ops xlog_rud_item_ops = {
 721        .item_type              = XFS_LI_RUD,
 722        .commit_pass2           = xlog_recover_rud_commit_pass2,
 723};
 724