linux/fs/xfs/xfs_bmap_util.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * Copyright (c) 2012 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_bit.h"
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_btree.h"
  18#include "xfs_trans.h"
  19#include "xfs_alloc.h"
  20#include "xfs_bmap.h"
  21#include "xfs_bmap_util.h"
  22#include "xfs_bmap_btree.h"
  23#include "xfs_rtalloc.h"
  24#include "xfs_error.h"
  25#include "xfs_quota.h"
  26#include "xfs_trans_space.h"
  27#include "xfs_trace.h"
  28#include "xfs_icache.h"
  29#include "xfs_iomap.h"
  30#include "xfs_reflink.h"
  31
  32/* Kernel only BMAP related definitions and functions */
  33
  34/*
  35 * Convert the given file system block to a disk block.  We have to treat it
  36 * differently based on whether the file is a real time file or not, because the
  37 * bmap code does.
  38 */
  39xfs_daddr_t
  40xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  41{
  42        if (XFS_IS_REALTIME_INODE(ip))
  43                return XFS_FSB_TO_BB(ip->i_mount, fsb);
  44        return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
  45}
  46
  47/*
  48 * Routine to zero an extent on disk allocated to the specific inode.
  49 *
  50 * The VFS functions take a linearised filesystem block offset, so we have to
  51 * convert the sparse xfs fsb to the right format first.
  52 * VFS types are real funky, too.
  53 */
  54int
  55xfs_zero_extent(
  56        struct xfs_inode *ip,
  57        xfs_fsblock_t   start_fsb,
  58        xfs_off_t       count_fsb)
  59{
  60        struct xfs_mount *mp = ip->i_mount;
  61        xfs_daddr_t     sector = xfs_fsb_to_db(ip, start_fsb);
  62        sector_t        block = XFS_BB_TO_FSBT(mp, sector);
  63
  64        return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
  65                block << (mp->m_super->s_blocksize_bits - 9),
  66                count_fsb << (mp->m_super->s_blocksize_bits - 9),
  67                GFP_NOFS, 0);
  68}
  69
  70#ifdef CONFIG_XFS_RT
  71int
  72xfs_bmap_rtalloc(
  73        struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
  74{
  75        int             error;          /* error return value */
  76        xfs_mount_t     *mp;            /* mount point structure */
  77        xfs_extlen_t    prod = 0;       /* product factor for allocators */
  78        xfs_extlen_t    mod = 0;        /* product factor for allocators */
  79        xfs_extlen_t    ralen = 0;      /* realtime allocation length */
  80        xfs_extlen_t    align;          /* minimum allocation alignment */
  81        xfs_rtblock_t   rtb;
  82
  83        mp = ap->ip->i_mount;
  84        align = xfs_get_extsz_hint(ap->ip);
  85        prod = align / mp->m_sb.sb_rextsize;
  86        error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
  87                                        align, 1, ap->eof, 0,
  88                                        ap->conv, &ap->offset, &ap->length);
  89        if (error)
  90                return error;
  91        ASSERT(ap->length);
  92        ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
  93
  94        /*
  95         * If the offset & length are not perfectly aligned
  96         * then kill prod, it will just get us in trouble.
  97         */
  98        div_u64_rem(ap->offset, align, &mod);
  99        if (mod || ap->length % align)
 100                prod = 1;
 101        /*
 102         * Set ralen to be the actual requested length in rtextents.
 103         */
 104        ralen = ap->length / mp->m_sb.sb_rextsize;
 105        /*
 106         * If the old value was close enough to MAXEXTLEN that
 107         * we rounded up to it, cut it back so it's valid again.
 108         * Note that if it's a really large request (bigger than
 109         * MAXEXTLEN), we don't hear about that number, and can't
 110         * adjust the starting point to match it.
 111         */
 112        if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
 113                ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
 114
 115        /*
 116         * Lock out modifications to both the RT bitmap and summary inodes
 117         */
 118        xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
 119        xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
 120        xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
 121        xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
 122
 123        /*
 124         * If it's an allocation to an empty file at offset 0,
 125         * pick an extent that will space things out in the rt area.
 126         */
 127        if (ap->eof && ap->offset == 0) {
 128                xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
 129
 130                error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
 131                if (error)
 132                        return error;
 133                ap->blkno = rtx * mp->m_sb.sb_rextsize;
 134        } else {
 135                ap->blkno = 0;
 136        }
 137
 138        xfs_bmap_adjacent(ap);
 139
 140        /*
 141         * Realtime allocation, done through xfs_rtallocate_extent.
 142         */
 143        do_div(ap->blkno, mp->m_sb.sb_rextsize);
 144        rtb = ap->blkno;
 145        ap->length = ralen;
 146        error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
 147                                &ralen, ap->wasdel, prod, &rtb);
 148        if (error)
 149                return error;
 150
 151        ap->blkno = rtb;
 152        if (ap->blkno != NULLFSBLOCK) {
 153                ap->blkno *= mp->m_sb.sb_rextsize;
 154                ralen *= mp->m_sb.sb_rextsize;
 155                ap->length = ralen;
 156                ap->ip->i_d.di_nblocks += ralen;
 157                xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
 158                if (ap->wasdel)
 159                        ap->ip->i_delayed_blks -= ralen;
 160                /*
 161                 * Adjust the disk quota also. This was reserved
 162                 * earlier.
 163                 */
 164                xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
 165                        ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
 166                                        XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
 167
 168                /* Zero the extent if we were asked to do so */
 169                if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
 170                        error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
 171                        if (error)
 172                                return error;
 173                }
 174        } else {
 175                ap->length = 0;
 176        }
 177        return 0;
 178}
 179#endif /* CONFIG_XFS_RT */
 180
 181/*
 182 * Check if the endoff is outside the last extent. If so the caller will grow
 183 * the allocation to a stripe unit boundary.  All offsets are considered outside
 184 * the end of file for an empty fork, so 1 is returned in *eof in that case.
 185 */
 186int
 187xfs_bmap_eof(
 188        struct xfs_inode        *ip,
 189        xfs_fileoff_t           endoff,
 190        int                     whichfork,
 191        int                     *eof)
 192{
 193        struct xfs_bmbt_irec    rec;
 194        int                     error;
 195
 196        error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
 197        if (error || *eof)
 198                return error;
 199
 200        *eof = endoff >= rec.br_startoff + rec.br_blockcount;
 201        return 0;
 202}
 203
 204/*
 205 * Extent tree block counting routines.
 206 */
 207
 208/*
 209 * Count leaf blocks given a range of extent records.  Delayed allocation
 210 * extents are not counted towards the totals.
 211 */
 212xfs_extnum_t
 213xfs_bmap_count_leaves(
 214        struct xfs_ifork        *ifp,
 215        xfs_filblks_t           *count)
 216{
 217        struct xfs_iext_cursor  icur;
 218        struct xfs_bmbt_irec    got;
 219        xfs_extnum_t            numrecs = 0;
 220
 221        for_each_xfs_iext(ifp, &icur, &got) {
 222                if (!isnullstartblock(got.br_startblock)) {
 223                        *count += got.br_blockcount;
 224                        numrecs++;
 225                }
 226        }
 227
 228        return numrecs;
 229}
 230
 231/*
 232 * Count leaf blocks given a range of extent records originally
 233 * in btree format.
 234 */
 235STATIC void
 236xfs_bmap_disk_count_leaves(
 237        struct xfs_mount        *mp,
 238        struct xfs_btree_block  *block,
 239        int                     numrecs,
 240        xfs_filblks_t           *count)
 241{
 242        int             b;
 243        xfs_bmbt_rec_t  *frp;
 244
 245        for (b = 1; b <= numrecs; b++) {
 246                frp = XFS_BMBT_REC_ADDR(mp, block, b);
 247                *count += xfs_bmbt_disk_get_blockcount(frp);
 248        }
 249}
 250
 251/*
 252 * Recursively walks each level of a btree
 253 * to count total fsblocks in use.
 254 */
 255STATIC int
 256xfs_bmap_count_tree(
 257        struct xfs_mount        *mp,
 258        struct xfs_trans        *tp,
 259        struct xfs_ifork        *ifp,
 260        xfs_fsblock_t           blockno,
 261        int                     levelin,
 262        xfs_extnum_t            *nextents,
 263        xfs_filblks_t           *count)
 264{
 265        int                     error;
 266        struct xfs_buf          *bp, *nbp;
 267        int                     level = levelin;
 268        __be64                  *pp;
 269        xfs_fsblock_t           bno = blockno;
 270        xfs_fsblock_t           nextbno;
 271        struct xfs_btree_block  *block, *nextblock;
 272        int                     numrecs;
 273
 274        error = xfs_btree_read_bufl(mp, tp, bno, &bp, XFS_BMAP_BTREE_REF,
 275                                                &xfs_bmbt_buf_ops);
 276        if (error)
 277                return error;
 278        *count += 1;
 279        block = XFS_BUF_TO_BLOCK(bp);
 280
 281        if (--level) {
 282                /* Not at node above leaves, count this level of nodes */
 283                nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 284                while (nextbno != NULLFSBLOCK) {
 285                        error = xfs_btree_read_bufl(mp, tp, nextbno, &nbp,
 286                                                XFS_BMAP_BTREE_REF,
 287                                                &xfs_bmbt_buf_ops);
 288                        if (error)
 289                                return error;
 290                        *count += 1;
 291                        nextblock = XFS_BUF_TO_BLOCK(nbp);
 292                        nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
 293                        xfs_trans_brelse(tp, nbp);
 294                }
 295
 296                /* Dive to the next level */
 297                pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
 298                bno = be64_to_cpu(*pp);
 299                error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
 300                                count);
 301                if (error) {
 302                        xfs_trans_brelse(tp, bp);
 303                        XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
 304                                         XFS_ERRLEVEL_LOW, mp);
 305                        return -EFSCORRUPTED;
 306                }
 307                xfs_trans_brelse(tp, bp);
 308        } else {
 309                /* count all level 1 nodes and their leaves */
 310                for (;;) {
 311                        nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 312                        numrecs = be16_to_cpu(block->bb_numrecs);
 313                        (*nextents) += numrecs;
 314                        xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
 315                        xfs_trans_brelse(tp, bp);
 316                        if (nextbno == NULLFSBLOCK)
 317                                break;
 318                        bno = nextbno;
 319                        error = xfs_btree_read_bufl(mp, tp, bno, &bp,
 320                                                XFS_BMAP_BTREE_REF,
 321                                                &xfs_bmbt_buf_ops);
 322                        if (error)
 323                                return error;
 324                        *count += 1;
 325                        block = XFS_BUF_TO_BLOCK(bp);
 326                }
 327        }
 328        return 0;
 329}
 330
 331/*
 332 * Count fsblocks of the given fork.  Delayed allocation extents are
 333 * not counted towards the totals.
 334 */
 335int
 336xfs_bmap_count_blocks(
 337        struct xfs_trans        *tp,
 338        struct xfs_inode        *ip,
 339        int                     whichfork,
 340        xfs_extnum_t            *nextents,
 341        xfs_filblks_t           *count)
 342{
 343        struct xfs_mount        *mp;    /* file system mount structure */
 344        __be64                  *pp;    /* pointer to block address */
 345        struct xfs_btree_block  *block; /* current btree block */
 346        struct xfs_ifork        *ifp;   /* fork structure */
 347        xfs_fsblock_t           bno;    /* block # of "block" */
 348        int                     level;  /* btree level, for checking */
 349        int                     error;
 350
 351        bno = NULLFSBLOCK;
 352        mp = ip->i_mount;
 353        *nextents = 0;
 354        *count = 0;
 355        ifp = XFS_IFORK_PTR(ip, whichfork);
 356        if (!ifp)
 357                return 0;
 358
 359        switch (XFS_IFORK_FORMAT(ip, whichfork)) {
 360        case XFS_DINODE_FMT_EXTENTS:
 361                *nextents = xfs_bmap_count_leaves(ifp, count);
 362                return 0;
 363        case XFS_DINODE_FMT_BTREE:
 364                if (!(ifp->if_flags & XFS_IFEXTENTS)) {
 365                        error = xfs_iread_extents(tp, ip, whichfork);
 366                        if (error)
 367                                return error;
 368                }
 369
 370                /*
 371                 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 372                 */
 373                block = ifp->if_broot;
 374                level = be16_to_cpu(block->bb_level);
 375                ASSERT(level > 0);
 376                pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
 377                bno = be64_to_cpu(*pp);
 378                ASSERT(bno != NULLFSBLOCK);
 379                ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
 380                ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
 381
 382                error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
 383                                nextents, count);
 384                if (error) {
 385                        XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
 386                                        XFS_ERRLEVEL_LOW, mp);
 387                        return -EFSCORRUPTED;
 388                }
 389                return 0;
 390        }
 391
 392        return 0;
 393}
 394
 395static int
 396xfs_getbmap_report_one(
 397        struct xfs_inode        *ip,
 398        struct getbmapx         *bmv,
 399        struct kgetbmap         *out,
 400        int64_t                 bmv_end,
 401        struct xfs_bmbt_irec    *got)
 402{
 403        struct kgetbmap         *p = out + bmv->bmv_entries;
 404        bool                    shared = false;
 405        int                     error;
 406
 407        error = xfs_reflink_trim_around_shared(ip, got, &shared);
 408        if (error)
 409                return error;
 410
 411        if (isnullstartblock(got->br_startblock) ||
 412            got->br_startblock == DELAYSTARTBLOCK) {
 413                /*
 414                 * Delalloc extents that start beyond EOF can occur due to
 415                 * speculative EOF allocation when the delalloc extent is larger
 416                 * than the largest freespace extent at conversion time.  These
 417                 * extents cannot be converted by data writeback, so can exist
 418                 * here even if we are not supposed to be finding delalloc
 419                 * extents.
 420                 */
 421                if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
 422                        ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
 423
 424                p->bmv_oflags |= BMV_OF_DELALLOC;
 425                p->bmv_block = -2;
 426        } else {
 427                p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
 428        }
 429
 430        if (got->br_state == XFS_EXT_UNWRITTEN &&
 431            (bmv->bmv_iflags & BMV_IF_PREALLOC))
 432                p->bmv_oflags |= BMV_OF_PREALLOC;
 433
 434        if (shared)
 435                p->bmv_oflags |= BMV_OF_SHARED;
 436
 437        p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
 438        p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
 439
 440        bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 441        bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 442        bmv->bmv_entries++;
 443        return 0;
 444}
 445
 446static void
 447xfs_getbmap_report_hole(
 448        struct xfs_inode        *ip,
 449        struct getbmapx         *bmv,
 450        struct kgetbmap         *out,
 451        int64_t                 bmv_end,
 452        xfs_fileoff_t           bno,
 453        xfs_fileoff_t           end)
 454{
 455        struct kgetbmap         *p = out + bmv->bmv_entries;
 456
 457        if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
 458                return;
 459
 460        p->bmv_block = -1;
 461        p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
 462        p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
 463
 464        bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 465        bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 466        bmv->bmv_entries++;
 467}
 468
 469static inline bool
 470xfs_getbmap_full(
 471        struct getbmapx         *bmv)
 472{
 473        return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
 474}
 475
 476static bool
 477xfs_getbmap_next_rec(
 478        struct xfs_bmbt_irec    *rec,
 479        xfs_fileoff_t           total_end)
 480{
 481        xfs_fileoff_t           end = rec->br_startoff + rec->br_blockcount;
 482
 483        if (end == total_end)
 484                return false;
 485
 486        rec->br_startoff += rec->br_blockcount;
 487        if (!isnullstartblock(rec->br_startblock) &&
 488            rec->br_startblock != DELAYSTARTBLOCK)
 489                rec->br_startblock += rec->br_blockcount;
 490        rec->br_blockcount = total_end - end;
 491        return true;
 492}
 493
 494/*
 495 * Get inode's extents as described in bmv, and format for output.
 496 * Calls formatter to fill the user's buffer until all extents
 497 * are mapped, until the passed-in bmv->bmv_count slots have
 498 * been filled, or until the formatter short-circuits the loop,
 499 * if it is tracking filled-in extents on its own.
 500 */
 501int                                             /* error code */
 502xfs_getbmap(
 503        struct xfs_inode        *ip,
 504        struct getbmapx         *bmv,           /* user bmap structure */
 505        struct kgetbmap         *out)
 506{
 507        struct xfs_mount        *mp = ip->i_mount;
 508        int                     iflags = bmv->bmv_iflags;
 509        int                     whichfork, lock, error = 0;
 510        int64_t                 bmv_end, max_len;
 511        xfs_fileoff_t           bno, first_bno;
 512        struct xfs_ifork        *ifp;
 513        struct xfs_bmbt_irec    got, rec;
 514        xfs_filblks_t           len;
 515        struct xfs_iext_cursor  icur;
 516
 517        if (bmv->bmv_iflags & ~BMV_IF_VALID)
 518                return -EINVAL;
 519#ifndef DEBUG
 520        /* Only allow CoW fork queries if we're debugging. */
 521        if (iflags & BMV_IF_COWFORK)
 522                return -EINVAL;
 523#endif
 524        if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
 525                return -EINVAL;
 526
 527        if (bmv->bmv_length < -1)
 528                return -EINVAL;
 529        bmv->bmv_entries = 0;
 530        if (bmv->bmv_length == 0)
 531                return 0;
 532
 533        if (iflags & BMV_IF_ATTRFORK)
 534                whichfork = XFS_ATTR_FORK;
 535        else if (iflags & BMV_IF_COWFORK)
 536                whichfork = XFS_COW_FORK;
 537        else
 538                whichfork = XFS_DATA_FORK;
 539        ifp = XFS_IFORK_PTR(ip, whichfork);
 540
 541        xfs_ilock(ip, XFS_IOLOCK_SHARED);
 542        switch (whichfork) {
 543        case XFS_ATTR_FORK:
 544                if (!XFS_IFORK_Q(ip))
 545                        goto out_unlock_iolock;
 546
 547                max_len = 1LL << 32;
 548                lock = xfs_ilock_attr_map_shared(ip);
 549                break;
 550        case XFS_COW_FORK:
 551                /* No CoW fork? Just return */
 552                if (!ifp)
 553                        goto out_unlock_iolock;
 554
 555                if (xfs_get_cowextsz_hint(ip))
 556                        max_len = mp->m_super->s_maxbytes;
 557                else
 558                        max_len = XFS_ISIZE(ip);
 559
 560                lock = XFS_ILOCK_SHARED;
 561                xfs_ilock(ip, lock);
 562                break;
 563        case XFS_DATA_FORK:
 564                if (!(iflags & BMV_IF_DELALLOC) &&
 565                    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
 566                        error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 567                        if (error)
 568                                goto out_unlock_iolock;
 569
 570                        /*
 571                         * Even after flushing the inode, there can still be
 572                         * delalloc blocks on the inode beyond EOF due to
 573                         * speculative preallocation.  These are not removed
 574                         * until the release function is called or the inode
 575                         * is inactivated.  Hence we cannot assert here that
 576                         * ip->i_delayed_blks == 0.
 577                         */
 578                }
 579
 580                if (xfs_get_extsz_hint(ip) ||
 581                    (ip->i_d.di_flags &
 582                     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
 583                        max_len = mp->m_super->s_maxbytes;
 584                else
 585                        max_len = XFS_ISIZE(ip);
 586
 587                lock = xfs_ilock_data_map_shared(ip);
 588                break;
 589        }
 590
 591        switch (XFS_IFORK_FORMAT(ip, whichfork)) {
 592        case XFS_DINODE_FMT_EXTENTS:
 593        case XFS_DINODE_FMT_BTREE:
 594                break;
 595        case XFS_DINODE_FMT_LOCAL:
 596                /* Local format inode forks report no extents. */
 597                goto out_unlock_ilock;
 598        default:
 599                error = -EINVAL;
 600                goto out_unlock_ilock;
 601        }
 602
 603        if (bmv->bmv_length == -1) {
 604                max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
 605                bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
 606        }
 607
 608        bmv_end = bmv->bmv_offset + bmv->bmv_length;
 609
 610        first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
 611        len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
 612
 613        if (!(ifp->if_flags & XFS_IFEXTENTS)) {
 614                error = xfs_iread_extents(NULL, ip, whichfork);
 615                if (error)
 616                        goto out_unlock_ilock;
 617        }
 618
 619        if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
 620                /*
 621                 * Report a whole-file hole if the delalloc flag is set to
 622                 * stay compatible with the old implementation.
 623                 */
 624                if (iflags & BMV_IF_DELALLOC)
 625                        xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 626                                        XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
 627                goto out_unlock_ilock;
 628        }
 629
 630        while (!xfs_getbmap_full(bmv)) {
 631                xfs_trim_extent(&got, first_bno, len);
 632
 633                /*
 634                 * Report an entry for a hole if this extent doesn't directly
 635                 * follow the previous one.
 636                 */
 637                if (got.br_startoff > bno) {
 638                        xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 639                                        got.br_startoff);
 640                        if (xfs_getbmap_full(bmv))
 641                                break;
 642                }
 643
 644                /*
 645                 * In order to report shared extents accurately, we report each
 646                 * distinct shared / unshared part of a single bmbt record with
 647                 * an individual getbmapx record.
 648                 */
 649                bno = got.br_startoff + got.br_blockcount;
 650                rec = got;
 651                do {
 652                        error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
 653                                        &rec);
 654                        if (error || xfs_getbmap_full(bmv))
 655                                goto out_unlock_ilock;
 656                } while (xfs_getbmap_next_rec(&rec, bno));
 657
 658                if (!xfs_iext_next_extent(ifp, &icur, &got)) {
 659                        xfs_fileoff_t   end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
 660
 661                        out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
 662
 663                        if (whichfork != XFS_ATTR_FORK && bno < end &&
 664                            !xfs_getbmap_full(bmv)) {
 665                                xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
 666                                                bno, end);
 667                        }
 668                        break;
 669                }
 670
 671                if (bno >= first_bno + len)
 672                        break;
 673        }
 674
 675out_unlock_ilock:
 676        xfs_iunlock(ip, lock);
 677out_unlock_iolock:
 678        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 679        return error;
 680}
 681
 682/*
 683 * Dead simple method of punching delalyed allocation blocks from a range in
 684 * the inode.  This will always punch out both the start and end blocks, even
 685 * if the ranges only partially overlap them, so it is up to the caller to
 686 * ensure that partial blocks are not passed in.
 687 */
 688int
 689xfs_bmap_punch_delalloc_range(
 690        struct xfs_inode        *ip,
 691        xfs_fileoff_t           start_fsb,
 692        xfs_fileoff_t           length)
 693{
 694        struct xfs_ifork        *ifp = &ip->i_df;
 695        xfs_fileoff_t           end_fsb = start_fsb + length;
 696        struct xfs_bmbt_irec    got, del;
 697        struct xfs_iext_cursor  icur;
 698        int                     error = 0;
 699
 700        ASSERT(ifp->if_flags & XFS_IFEXTENTS);
 701
 702        xfs_ilock(ip, XFS_ILOCK_EXCL);
 703        if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
 704                goto out_unlock;
 705
 706        while (got.br_startoff + got.br_blockcount > start_fsb) {
 707                del = got;
 708                xfs_trim_extent(&del, start_fsb, length);
 709
 710                /*
 711                 * A delete can push the cursor forward. Step back to the
 712                 * previous extent on non-delalloc or extents outside the
 713                 * target range.
 714                 */
 715                if (!del.br_blockcount ||
 716                    !isnullstartblock(del.br_startblock)) {
 717                        if (!xfs_iext_prev_extent(ifp, &icur, &got))
 718                                break;
 719                        continue;
 720                }
 721
 722                error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
 723                                                  &got, &del);
 724                if (error || !xfs_iext_get_extent(ifp, &icur, &got))
 725                        break;
 726        }
 727
 728out_unlock:
 729        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 730        return error;
 731}
 732
 733/*
 734 * Test whether it is appropriate to check an inode for and free post EOF
 735 * blocks. The 'force' parameter determines whether we should also consider
 736 * regular files that are marked preallocated or append-only.
 737 */
 738bool
 739xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
 740{
 741        /* prealloc/delalloc exists only on regular files */
 742        if (!S_ISREG(VFS_I(ip)->i_mode))
 743                return false;
 744
 745        /*
 746         * Zero sized files with no cached pages and delalloc blocks will not
 747         * have speculative prealloc/delalloc blocks to remove.
 748         */
 749        if (VFS_I(ip)->i_size == 0 &&
 750            VFS_I(ip)->i_mapping->nrpages == 0 &&
 751            ip->i_delayed_blks == 0)
 752                return false;
 753
 754        /* If we haven't read in the extent list, then don't do it now. */
 755        if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
 756                return false;
 757
 758        /*
 759         * Do not free real preallocated or append-only files unless the file
 760         * has delalloc blocks and we are forced to remove them.
 761         */
 762        if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
 763                if (!force || ip->i_delayed_blks == 0)
 764                        return false;
 765
 766        return true;
 767}
 768
 769/*
 770 * This is called to free any blocks beyond eof. The caller must hold
 771 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
 772 * reference to the inode.
 773 */
 774int
 775xfs_free_eofblocks(
 776        struct xfs_inode        *ip)
 777{
 778        struct xfs_trans        *tp;
 779        int                     error;
 780        xfs_fileoff_t           end_fsb;
 781        xfs_fileoff_t           last_fsb;
 782        xfs_filblks_t           map_len;
 783        int                     nimaps;
 784        struct xfs_bmbt_irec    imap;
 785        struct xfs_mount        *mp = ip->i_mount;
 786
 787        /*
 788         * Figure out if there are any blocks beyond the end
 789         * of the file.  If not, then there is nothing to do.
 790         */
 791        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
 792        last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 793        if (last_fsb <= end_fsb)
 794                return 0;
 795        map_len = last_fsb - end_fsb;
 796
 797        nimaps = 1;
 798        xfs_ilock(ip, XFS_ILOCK_SHARED);
 799        error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
 800        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 801
 802        /*
 803         * If there are blocks after the end of file, truncate the file to its
 804         * current size to free them up.
 805         */
 806        if (!error && (nimaps != 0) &&
 807            (imap.br_startblock != HOLESTARTBLOCK ||
 808             ip->i_delayed_blks)) {
 809                /*
 810                 * Attach the dquots to the inode up front.
 811                 */
 812                error = xfs_qm_dqattach(ip);
 813                if (error)
 814                        return error;
 815
 816                /* wait on dio to ensure i_size has settled */
 817                inode_dio_wait(VFS_I(ip));
 818
 819                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
 820                                &tp);
 821                if (error) {
 822                        ASSERT(XFS_FORCED_SHUTDOWN(mp));
 823                        return error;
 824                }
 825
 826                xfs_ilock(ip, XFS_ILOCK_EXCL);
 827                xfs_trans_ijoin(tp, ip, 0);
 828
 829                /*
 830                 * Do not update the on-disk file size.  If we update the
 831                 * on-disk file size and then the system crashes before the
 832                 * contents of the file are flushed to disk then the files
 833                 * may be full of holes (ie NULL files bug).
 834                 */
 835                error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
 836                                        XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
 837                if (error) {
 838                        /*
 839                         * If we get an error at this point we simply don't
 840                         * bother truncating the file.
 841                         */
 842                        xfs_trans_cancel(tp);
 843                } else {
 844                        error = xfs_trans_commit(tp);
 845                        if (!error)
 846                                xfs_inode_clear_eofblocks_tag(ip);
 847                }
 848
 849                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 850        }
 851        return error;
 852}
 853
 854int
 855xfs_alloc_file_space(
 856        struct xfs_inode        *ip,
 857        xfs_off_t               offset,
 858        xfs_off_t               len,
 859        int                     alloc_type)
 860{
 861        xfs_mount_t             *mp = ip->i_mount;
 862        xfs_off_t               count;
 863        xfs_filblks_t           allocated_fsb;
 864        xfs_filblks_t           allocatesize_fsb;
 865        xfs_extlen_t            extsz, temp;
 866        xfs_fileoff_t           startoffset_fsb;
 867        xfs_fileoff_t           endoffset_fsb;
 868        int                     nimaps;
 869        int                     quota_flag;
 870        int                     rt;
 871        xfs_trans_t             *tp;
 872        xfs_bmbt_irec_t         imaps[1], *imapp;
 873        uint                    qblocks, resblks, resrtextents;
 874        int                     error;
 875
 876        trace_xfs_alloc_file_space(ip);
 877
 878        if (XFS_FORCED_SHUTDOWN(mp))
 879                return -EIO;
 880
 881        error = xfs_qm_dqattach(ip);
 882        if (error)
 883                return error;
 884
 885        if (len <= 0)
 886                return -EINVAL;
 887
 888        rt = XFS_IS_REALTIME_INODE(ip);
 889        extsz = xfs_get_extsz_hint(ip);
 890
 891        count = len;
 892        imapp = &imaps[0];
 893        nimaps = 1;
 894        startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
 895        endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
 896        allocatesize_fsb = endoffset_fsb - startoffset_fsb;
 897
 898        /*
 899         * Allocate file space until done or until there is an error
 900         */
 901        while (allocatesize_fsb && !error) {
 902                xfs_fileoff_t   s, e;
 903
 904                /*
 905                 * Determine space reservations for data/realtime.
 906                 */
 907                if (unlikely(extsz)) {
 908                        s = startoffset_fsb;
 909                        do_div(s, extsz);
 910                        s *= extsz;
 911                        e = startoffset_fsb + allocatesize_fsb;
 912                        div_u64_rem(startoffset_fsb, extsz, &temp);
 913                        if (temp)
 914                                e += temp;
 915                        div_u64_rem(e, extsz, &temp);
 916                        if (temp)
 917                                e += extsz - temp;
 918                } else {
 919                        s = 0;
 920                        e = allocatesize_fsb;
 921                }
 922
 923                /*
 924                 * The transaction reservation is limited to a 32-bit block
 925                 * count, hence we need to limit the number of blocks we are
 926                 * trying to reserve to avoid an overflow. We can't allocate
 927                 * more than @nimaps extents, and an extent is limited on disk
 928                 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
 929                 */
 930                resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
 931                if (unlikely(rt)) {
 932                        resrtextents = qblocks = resblks;
 933                        resrtextents /= mp->m_sb.sb_rextsize;
 934                        resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 935                        quota_flag = XFS_QMOPT_RES_RTBLKS;
 936                } else {
 937                        resrtextents = 0;
 938                        resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
 939                        quota_flag = XFS_QMOPT_RES_REGBLKS;
 940                }
 941
 942                /*
 943                 * Allocate and setup the transaction.
 944                 */
 945                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
 946                                resrtextents, 0, &tp);
 947
 948                /*
 949                 * Check for running out of space
 950                 */
 951                if (error) {
 952                        /*
 953                         * Free the transaction structure.
 954                         */
 955                        ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
 956                        break;
 957                }
 958                xfs_ilock(ip, XFS_ILOCK_EXCL);
 959                error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
 960                                                      0, quota_flag);
 961                if (error)
 962                        goto error1;
 963
 964                xfs_trans_ijoin(tp, ip, 0);
 965
 966                error = xfs_bmapi_write(tp, ip, startoffset_fsb,
 967                                        allocatesize_fsb, alloc_type, resblks,
 968                                        imapp, &nimaps);
 969                if (error)
 970                        goto error0;
 971
 972                /*
 973                 * Complete the transaction
 974                 */
 975                error = xfs_trans_commit(tp);
 976                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 977                if (error)
 978                        break;
 979
 980                allocated_fsb = imapp->br_blockcount;
 981
 982                if (nimaps == 0) {
 983                        error = -ENOSPC;
 984                        break;
 985                }
 986
 987                startoffset_fsb += allocated_fsb;
 988                allocatesize_fsb -= allocated_fsb;
 989        }
 990
 991        return error;
 992
 993error0: /* unlock inode, unreserve quota blocks, cancel trans */
 994        xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
 995
 996error1: /* Just cancel transaction */
 997        xfs_trans_cancel(tp);
 998        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 999        return error;
1000}
1001
1002static int
1003xfs_unmap_extent(
1004        struct xfs_inode        *ip,
1005        xfs_fileoff_t           startoffset_fsb,
1006        xfs_filblks_t           len_fsb,
1007        int                     *done)
1008{
1009        struct xfs_mount        *mp = ip->i_mount;
1010        struct xfs_trans        *tp;
1011        uint                    resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1012        int                     error;
1013
1014        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1015        if (error) {
1016                ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1017                return error;
1018        }
1019
1020        xfs_ilock(ip, XFS_ILOCK_EXCL);
1021        error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1022                        ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1023        if (error)
1024                goto out_trans_cancel;
1025
1026        xfs_trans_ijoin(tp, ip, 0);
1027
1028        error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
1029        if (error)
1030                goto out_trans_cancel;
1031
1032        error = xfs_trans_commit(tp);
1033out_unlock:
1034        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1035        return error;
1036
1037out_trans_cancel:
1038        xfs_trans_cancel(tp);
1039        goto out_unlock;
1040}
1041
1042int
1043xfs_flush_unmap_range(
1044        struct xfs_inode        *ip,
1045        xfs_off_t               offset,
1046        xfs_off_t               len)
1047{
1048        struct xfs_mount        *mp = ip->i_mount;
1049        struct inode            *inode = VFS_I(ip);
1050        xfs_off_t               rounding, start, end;
1051        int                     error;
1052
1053        /* wait for the completion of any pending DIOs */
1054        inode_dio_wait(inode);
1055
1056        rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1057        start = round_down(offset, rounding);
1058        end = round_up(offset + len, rounding) - 1;
1059
1060        error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1061        if (error)
1062                return error;
1063        truncate_pagecache_range(inode, start, end);
1064        return 0;
1065}
1066
1067int
1068xfs_free_file_space(
1069        struct xfs_inode        *ip,
1070        xfs_off_t               offset,
1071        xfs_off_t               len)
1072{
1073        struct xfs_mount        *mp = ip->i_mount;
1074        xfs_fileoff_t           startoffset_fsb;
1075        xfs_fileoff_t           endoffset_fsb;
1076        int                     done = 0, error;
1077
1078        trace_xfs_free_file_space(ip);
1079
1080        error = xfs_qm_dqattach(ip);
1081        if (error)
1082                return error;
1083
1084        if (len <= 0)   /* if nothing being freed */
1085                return 0;
1086
1087        error = xfs_flush_unmap_range(ip, offset, len);
1088        if (error)
1089                return error;
1090
1091        startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1092        endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1093
1094        /*
1095         * Need to zero the stuff we're not freeing, on disk.
1096         */
1097        if (endoffset_fsb > startoffset_fsb) {
1098                while (!done) {
1099                        error = xfs_unmap_extent(ip, startoffset_fsb,
1100                                        endoffset_fsb - startoffset_fsb, &done);
1101                        if (error)
1102                                return error;
1103                }
1104        }
1105
1106        /*
1107         * Now that we've unmap all full blocks we'll have to zero out any
1108         * partial block at the beginning and/or end.  iomap_zero_range is smart
1109         * enough to skip any holes, including those we just created, but we
1110         * must take care not to zero beyond EOF and enlarge i_size.
1111         */
1112        if (offset >= XFS_ISIZE(ip))
1113                return 0;
1114        if (offset + len > XFS_ISIZE(ip))
1115                len = XFS_ISIZE(ip) - offset;
1116        error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
1117        if (error)
1118                return error;
1119
1120        /*
1121         * If we zeroed right up to EOF and EOF straddles a page boundary we
1122         * must make sure that the post-EOF area is also zeroed because the
1123         * page could be mmap'd and iomap_zero_range doesn't do that for us.
1124         * Writeback of the eof page will do this, albeit clumsily.
1125         */
1126        if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
1127                error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1128                                round_down(offset + len, PAGE_SIZE), LLONG_MAX);
1129        }
1130
1131        return error;
1132}
1133
1134/*
1135 * Preallocate and zero a range of a file. This mechanism has the allocation
1136 * semantics of fallocate and in addition converts data in the range to zeroes.
1137 */
1138int
1139xfs_zero_file_space(
1140        struct xfs_inode        *ip,
1141        xfs_off_t               offset,
1142        xfs_off_t               len)
1143{
1144        struct xfs_mount        *mp = ip->i_mount;
1145        uint                    blksize;
1146        int                     error;
1147
1148        trace_xfs_zero_file_space(ip);
1149
1150        blksize = 1 << mp->m_sb.sb_blocklog;
1151
1152        /*
1153         * Punch a hole and prealloc the range. We use hole punch rather than
1154         * unwritten extent conversion for two reasons:
1155         *
1156         * 1.) Hole punch handles partial block zeroing for us.
1157         *
1158         * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1159         * by virtue of the hole punch.
1160         */
1161        error = xfs_free_file_space(ip, offset, len);
1162        if (error || xfs_is_always_cow_inode(ip))
1163                return error;
1164
1165        return xfs_alloc_file_space(ip, round_down(offset, blksize),
1166                                     round_up(offset + len, blksize) -
1167                                     round_down(offset, blksize),
1168                                     XFS_BMAPI_PREALLOC);
1169}
1170
1171static int
1172xfs_prepare_shift(
1173        struct xfs_inode        *ip,
1174        loff_t                  offset)
1175{
1176        int                     error;
1177
1178        /*
1179         * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1180         * into the accessible region of the file.
1181         */
1182        if (xfs_can_free_eofblocks(ip, true)) {
1183                error = xfs_free_eofblocks(ip);
1184                if (error)
1185                        return error;
1186        }
1187
1188        /*
1189         * Writeback and invalidate cache for the remainder of the file as we're
1190         * about to shift down every extent from offset to EOF.
1191         */
1192        error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1193        if (error)
1194                return error;
1195
1196        /*
1197         * Clean out anything hanging around in the cow fork now that
1198         * we've flushed all the dirty data out to disk to avoid having
1199         * CoW extents at the wrong offsets.
1200         */
1201        if (xfs_inode_has_cow_data(ip)) {
1202                error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1203                                true);
1204                if (error)
1205                        return error;
1206        }
1207
1208        return 0;
1209}
1210
1211/*
1212 * xfs_collapse_file_space()
1213 *      This routine frees disk space and shift extent for the given file.
1214 *      The first thing we do is to free data blocks in the specified range
1215 *      by calling xfs_free_file_space(). It would also sync dirty data
1216 *      and invalidate page cache over the region on which collapse range
1217 *      is working. And Shift extent records to the left to cover a hole.
1218 * RETURNS:
1219 *      0 on success
1220 *      errno on error
1221 *
1222 */
1223int
1224xfs_collapse_file_space(
1225        struct xfs_inode        *ip,
1226        xfs_off_t               offset,
1227        xfs_off_t               len)
1228{
1229        struct xfs_mount        *mp = ip->i_mount;
1230        struct xfs_trans        *tp;
1231        int                     error;
1232        xfs_fileoff_t           next_fsb = XFS_B_TO_FSB(mp, offset + len);
1233        xfs_fileoff_t           shift_fsb = XFS_B_TO_FSB(mp, len);
1234        uint                    resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1235        bool                    done = false;
1236
1237        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1238        ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1239
1240        trace_xfs_collapse_file_space(ip);
1241
1242        error = xfs_free_file_space(ip, offset, len);
1243        if (error)
1244                return error;
1245
1246        error = xfs_prepare_shift(ip, offset);
1247        if (error)
1248                return error;
1249
1250        while (!error && !done) {
1251                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1252                                        &tp);
1253                if (error)
1254                        break;
1255
1256                xfs_ilock(ip, XFS_ILOCK_EXCL);
1257                error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1258                                ip->i_gdquot, ip->i_pdquot, resblks, 0,
1259                                XFS_QMOPT_RES_REGBLKS);
1260                if (error)
1261                        goto out_trans_cancel;
1262                xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1263
1264                error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1265                                &done);
1266                if (error)
1267                        goto out_trans_cancel;
1268
1269                error = xfs_trans_commit(tp);
1270        }
1271
1272        return error;
1273
1274out_trans_cancel:
1275        xfs_trans_cancel(tp);
1276        return error;
1277}
1278
1279/*
1280 * xfs_insert_file_space()
1281 *      This routine create hole space by shifting extents for the given file.
1282 *      The first thing we do is to sync dirty data and invalidate page cache
1283 *      over the region on which insert range is working. And split an extent
1284 *      to two extents at given offset by calling xfs_bmap_split_extent.
1285 *      And shift all extent records which are laying between [offset,
1286 *      last allocated extent] to the right to reserve hole range.
1287 * RETURNS:
1288 *      0 on success
1289 *      errno on error
1290 */
1291int
1292xfs_insert_file_space(
1293        struct xfs_inode        *ip,
1294        loff_t                  offset,
1295        loff_t                  len)
1296{
1297        struct xfs_mount        *mp = ip->i_mount;
1298        struct xfs_trans        *tp;
1299        int                     error;
1300        xfs_fileoff_t           stop_fsb = XFS_B_TO_FSB(mp, offset);
1301        xfs_fileoff_t           next_fsb = NULLFSBLOCK;
1302        xfs_fileoff_t           shift_fsb = XFS_B_TO_FSB(mp, len);
1303        bool                    done = false;
1304
1305        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1306        ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1307
1308        trace_xfs_insert_file_space(ip);
1309
1310        error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1311        if (error)
1312                return error;
1313
1314        error = xfs_prepare_shift(ip, offset);
1315        if (error)
1316                return error;
1317
1318        /*
1319         * The extent shifting code works on extent granularity. So, if stop_fsb
1320         * is not the starting block of extent, we need to split the extent at
1321         * stop_fsb.
1322         */
1323        error = xfs_bmap_split_extent(ip, stop_fsb);
1324        if (error)
1325                return error;
1326
1327        while (!error && !done) {
1328                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
1329                                        &tp);
1330                if (error)
1331                        break;
1332
1333                xfs_ilock(ip, XFS_ILOCK_EXCL);
1334                xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1335                error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1336                                &done, stop_fsb);
1337                if (error)
1338                        goto out_trans_cancel;
1339
1340                error = xfs_trans_commit(tp);
1341        }
1342
1343        return error;
1344
1345out_trans_cancel:
1346        xfs_trans_cancel(tp);
1347        return error;
1348}
1349
1350/*
1351 * We need to check that the format of the data fork in the temporary inode is
1352 * valid for the target inode before doing the swap. This is not a problem with
1353 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1354 * data fork depending on the space the attribute fork is taking so we can get
1355 * invalid formats on the target inode.
1356 *
1357 * E.g. target has space for 7 extents in extent format, temp inode only has
1358 * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1359 * btree, but when swapped it needs to be in extent format. Hence we can't just
1360 * blindly swap data forks on attr2 filesystems.
1361 *
1362 * Note that we check the swap in both directions so that we don't end up with
1363 * a corrupt temporary inode, either.
1364 *
1365 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1366 * inode will prevent this situation from occurring, so all we do here is
1367 * reject and log the attempt. basically we are putting the responsibility on
1368 * userspace to get this right.
1369 */
1370static int
1371xfs_swap_extents_check_format(
1372        struct xfs_inode        *ip,    /* target inode */
1373        struct xfs_inode        *tip)   /* tmp inode */
1374{
1375
1376        /* Should never get a local format */
1377        if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1378            tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1379                return -EINVAL;
1380
1381        /*
1382         * if the target inode has less extents that then temporary inode then
1383         * why did userspace call us?
1384         */
1385        if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1386                return -EINVAL;
1387
1388        /*
1389         * If we have to use the (expensive) rmap swap method, we can
1390         * handle any number of extents and any format.
1391         */
1392        if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1393                return 0;
1394
1395        /*
1396         * if the target inode is in extent form and the temp inode is in btree
1397         * form then we will end up with the target inode in the wrong format
1398         * as we already know there are less extents in the temp inode.
1399         */
1400        if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1401            tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1402                return -EINVAL;
1403
1404        /* Check temp in extent form to max in target */
1405        if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1406            XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1407                        XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1408                return -EINVAL;
1409
1410        /* Check target in extent form to max in temp */
1411        if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1412            XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1413                        XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1414                return -EINVAL;
1415
1416        /*
1417         * If we are in a btree format, check that the temp root block will fit
1418         * in the target and that it has enough extents to be in btree format
1419         * in the target.
1420         *
1421         * Note that we have to be careful to allow btree->extent conversions
1422         * (a common defrag case) which will occur when the temp inode is in
1423         * extent format...
1424         */
1425        if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1426                if (XFS_IFORK_Q(ip) &&
1427                    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1428                        return -EINVAL;
1429                if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1430                    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1431                        return -EINVAL;
1432        }
1433
1434        /* Reciprocal target->temp btree format checks */
1435        if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1436                if (XFS_IFORK_Q(tip) &&
1437                    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1438                        return -EINVAL;
1439                if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1440                    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1441                        return -EINVAL;
1442        }
1443
1444        return 0;
1445}
1446
1447static int
1448xfs_swap_extent_flush(
1449        struct xfs_inode        *ip)
1450{
1451        int     error;
1452
1453        error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1454        if (error)
1455                return error;
1456        truncate_pagecache_range(VFS_I(ip), 0, -1);
1457
1458        /* Verify O_DIRECT for ftmp */
1459        if (VFS_I(ip)->i_mapping->nrpages)
1460                return -EINVAL;
1461        return 0;
1462}
1463
1464/*
1465 * Move extents from one file to another, when rmap is enabled.
1466 */
1467STATIC int
1468xfs_swap_extent_rmap(
1469        struct xfs_trans                **tpp,
1470        struct xfs_inode                *ip,
1471        struct xfs_inode                *tip)
1472{
1473        struct xfs_trans                *tp = *tpp;
1474        struct xfs_bmbt_irec            irec;
1475        struct xfs_bmbt_irec            uirec;
1476        struct xfs_bmbt_irec            tirec;
1477        xfs_fileoff_t                   offset_fsb;
1478        xfs_fileoff_t                   end_fsb;
1479        xfs_filblks_t                   count_fsb;
1480        int                             error;
1481        xfs_filblks_t                   ilen;
1482        xfs_filblks_t                   rlen;
1483        int                             nimaps;
1484        uint64_t                        tip_flags2;
1485
1486        /*
1487         * If the source file has shared blocks, we must flag the donor
1488         * file as having shared blocks so that we get the shared-block
1489         * rmap functions when we go to fix up the rmaps.  The flags
1490         * will be switch for reals later.
1491         */
1492        tip_flags2 = tip->i_d.di_flags2;
1493        if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1494                tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1495
1496        offset_fsb = 0;
1497        end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1498        count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1499
1500        while (count_fsb) {
1501                /* Read extent from the donor file */
1502                nimaps = 1;
1503                error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1504                                &nimaps, 0);
1505                if (error)
1506                        goto out;
1507                ASSERT(nimaps == 1);
1508                ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1509
1510                trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1511                ilen = tirec.br_blockcount;
1512
1513                /* Unmap the old blocks in the source file. */
1514                while (tirec.br_blockcount) {
1515                        ASSERT(tp->t_firstblock == NULLFSBLOCK);
1516                        trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1517
1518                        /* Read extent from the source file */
1519                        nimaps = 1;
1520                        error = xfs_bmapi_read(ip, tirec.br_startoff,
1521                                        tirec.br_blockcount, &irec,
1522                                        &nimaps, 0);
1523                        if (error)
1524                                goto out;
1525                        ASSERT(nimaps == 1);
1526                        ASSERT(tirec.br_startoff == irec.br_startoff);
1527                        trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1528
1529                        /* Trim the extent. */
1530                        uirec = tirec;
1531                        uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1532                                        tirec.br_blockcount,
1533                                        irec.br_blockcount);
1534                        trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1535
1536                        /* Remove the mapping from the donor file. */
1537                        xfs_bmap_unmap_extent(tp, tip, &uirec);
1538
1539                        /* Remove the mapping from the source file. */
1540                        xfs_bmap_unmap_extent(tp, ip, &irec);
1541
1542                        /* Map the donor file's blocks into the source file. */
1543                        xfs_bmap_map_extent(tp, ip, &uirec);
1544
1545                        /* Map the source file's blocks into the donor file. */
1546                        xfs_bmap_map_extent(tp, tip, &irec);
1547
1548                        error = xfs_defer_finish(tpp);
1549                        tp = *tpp;
1550                        if (error)
1551                                goto out;
1552
1553                        tirec.br_startoff += rlen;
1554                        if (tirec.br_startblock != HOLESTARTBLOCK &&
1555                            tirec.br_startblock != DELAYSTARTBLOCK)
1556                                tirec.br_startblock += rlen;
1557                        tirec.br_blockcount -= rlen;
1558                }
1559
1560                /* Roll on... */
1561                count_fsb -= ilen;
1562                offset_fsb += ilen;
1563        }
1564
1565        tip->i_d.di_flags2 = tip_flags2;
1566        return 0;
1567
1568out:
1569        trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1570        tip->i_d.di_flags2 = tip_flags2;
1571        return error;
1572}
1573
1574/* Swap the extents of two files by swapping data forks. */
1575STATIC int
1576xfs_swap_extent_forks(
1577        struct xfs_trans        *tp,
1578        struct xfs_inode        *ip,
1579        struct xfs_inode        *tip,
1580        int                     *src_log_flags,
1581        int                     *target_log_flags)
1582{
1583        xfs_filblks_t           aforkblks = 0;
1584        xfs_filblks_t           taforkblks = 0;
1585        xfs_extnum_t            junk;
1586        uint64_t                tmp;
1587        int                     error;
1588
1589        /*
1590         * Count the number of extended attribute blocks
1591         */
1592        if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1593             (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1594                error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1595                                &aforkblks);
1596                if (error)
1597                        return error;
1598        }
1599        if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1600             (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1601                error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1602                                &taforkblks);
1603                if (error)
1604                        return error;
1605        }
1606
1607        /*
1608         * Btree format (v3) inodes have the inode number stamped in the bmbt
1609         * block headers. We can't start changing the bmbt blocks until the
1610         * inode owner change is logged so recovery does the right thing in the
1611         * event of a crash. Set the owner change log flags now and leave the
1612         * bmbt scan as the last step.
1613         */
1614        if (ip->i_d.di_version == 3 &&
1615            ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1616                (*target_log_flags) |= XFS_ILOG_DOWNER;
1617        if (tip->i_d.di_version == 3 &&
1618            tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1619                (*src_log_flags) |= XFS_ILOG_DOWNER;
1620
1621        /*
1622         * Swap the data forks of the inodes
1623         */
1624        swap(ip->i_df, tip->i_df);
1625
1626        /*
1627         * Fix the on-disk inode values
1628         */
1629        tmp = (uint64_t)ip->i_d.di_nblocks;
1630        ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1631        tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1632
1633        swap(ip->i_d.di_nextents, tip->i_d.di_nextents);
1634        swap(ip->i_d.di_format, tip->i_d.di_format);
1635
1636        /*
1637         * The extents in the source inode could still contain speculative
1638         * preallocation beyond EOF (e.g. the file is open but not modified
1639         * while defrag is in progress). In that case, we need to copy over the
1640         * number of delalloc blocks the data fork in the source inode is
1641         * tracking beyond EOF so that when the fork is truncated away when the
1642         * temporary inode is unlinked we don't underrun the i_delayed_blks
1643         * counter on that inode.
1644         */
1645        ASSERT(tip->i_delayed_blks == 0);
1646        tip->i_delayed_blks = ip->i_delayed_blks;
1647        ip->i_delayed_blks = 0;
1648
1649        switch (ip->i_d.di_format) {
1650        case XFS_DINODE_FMT_EXTENTS:
1651                (*src_log_flags) |= XFS_ILOG_DEXT;
1652                break;
1653        case XFS_DINODE_FMT_BTREE:
1654                ASSERT(ip->i_d.di_version < 3 ||
1655                       (*src_log_flags & XFS_ILOG_DOWNER));
1656                (*src_log_flags) |= XFS_ILOG_DBROOT;
1657                break;
1658        }
1659
1660        switch (tip->i_d.di_format) {
1661        case XFS_DINODE_FMT_EXTENTS:
1662                (*target_log_flags) |= XFS_ILOG_DEXT;
1663                break;
1664        case XFS_DINODE_FMT_BTREE:
1665                (*target_log_flags) |= XFS_ILOG_DBROOT;
1666                ASSERT(tip->i_d.di_version < 3 ||
1667                       (*target_log_flags & XFS_ILOG_DOWNER));
1668                break;
1669        }
1670
1671        return 0;
1672}
1673
1674/*
1675 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1676 * change owner scan attempts to order all modified buffers in the current
1677 * transaction. In the event of ordered buffer failure, the offending buffer is
1678 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1679 * the transaction in this case to replenish the fallback log reservation and
1680 * restart the scan. This process repeats until the scan completes.
1681 */
1682static int
1683xfs_swap_change_owner(
1684        struct xfs_trans        **tpp,
1685        struct xfs_inode        *ip,
1686        struct xfs_inode        *tmpip)
1687{
1688        int                     error;
1689        struct xfs_trans        *tp = *tpp;
1690
1691        do {
1692                error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1693                                              NULL);
1694                /* success or fatal error */
1695                if (error != -EAGAIN)
1696                        break;
1697
1698                error = xfs_trans_roll(tpp);
1699                if (error)
1700                        break;
1701                tp = *tpp;
1702
1703                /*
1704                 * Redirty both inodes so they can relog and keep the log tail
1705                 * moving forward.
1706                 */
1707                xfs_trans_ijoin(tp, ip, 0);
1708                xfs_trans_ijoin(tp, tmpip, 0);
1709                xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1710                xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1711        } while (true);
1712
1713        return error;
1714}
1715
1716int
1717xfs_swap_extents(
1718        struct xfs_inode        *ip,    /* target inode */
1719        struct xfs_inode        *tip,   /* tmp inode */
1720        struct xfs_swapext      *sxp)
1721{
1722        struct xfs_mount        *mp = ip->i_mount;
1723        struct xfs_trans        *tp;
1724        struct xfs_bstat        *sbp = &sxp->sx_stat;
1725        int                     src_log_flags, target_log_flags;
1726        int                     error = 0;
1727        int                     lock_flags;
1728        uint64_t                f;
1729        int                     resblks = 0;
1730
1731        /*
1732         * Lock the inodes against other IO, page faults and truncate to
1733         * begin with.  Then we can ensure the inodes are flushed and have no
1734         * page cache safely. Once we have done this we can take the ilocks and
1735         * do the rest of the checks.
1736         */
1737        lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1738        lock_flags = XFS_MMAPLOCK_EXCL;
1739        xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1740
1741        /* Verify that both files have the same format */
1742        if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1743                error = -EINVAL;
1744                goto out_unlock;
1745        }
1746
1747        /* Verify both files are either real-time or non-realtime */
1748        if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1749                error = -EINVAL;
1750                goto out_unlock;
1751        }
1752
1753        error = xfs_swap_extent_flush(ip);
1754        if (error)
1755                goto out_unlock;
1756        error = xfs_swap_extent_flush(tip);
1757        if (error)
1758                goto out_unlock;
1759
1760        if (xfs_inode_has_cow_data(tip)) {
1761                error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1762                if (error)
1763                        return error;
1764        }
1765
1766        /*
1767         * Extent "swapping" with rmap requires a permanent reservation and
1768         * a block reservation because it's really just a remap operation
1769         * performed with log redo items!
1770         */
1771        if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1772                int             w       = XFS_DATA_FORK;
1773                uint32_t        ipnext  = XFS_IFORK_NEXTENTS(ip, w);
1774                uint32_t        tipnext = XFS_IFORK_NEXTENTS(tip, w);
1775
1776                /*
1777                 * Conceptually this shouldn't affect the shape of either bmbt,
1778                 * but since we atomically move extents one by one, we reserve
1779                 * enough space to rebuild both trees.
1780                 */
1781                resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1782                resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1783
1784                /*
1785                 * Handle the corner case where either inode might straddle the
1786                 * btree format boundary. If so, the inode could bounce between
1787                 * btree <-> extent format on unmap -> remap cycles, freeing and
1788                 * allocating a bmapbt block each time.
1789                 */
1790                if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
1791                        resblks += XFS_IFORK_MAXEXT(ip, w);
1792                if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
1793                        resblks += XFS_IFORK_MAXEXT(tip, w);
1794        }
1795        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1796        if (error)
1797                goto out_unlock;
1798
1799        /*
1800         * Lock and join the inodes to the tansaction so that transaction commit
1801         * or cancel will unlock the inodes from this point onwards.
1802         */
1803        xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1804        lock_flags |= XFS_ILOCK_EXCL;
1805        xfs_trans_ijoin(tp, ip, 0);
1806        xfs_trans_ijoin(tp, tip, 0);
1807
1808
1809        /* Verify all data are being swapped */
1810        if (sxp->sx_offset != 0 ||
1811            sxp->sx_length != ip->i_d.di_size ||
1812            sxp->sx_length != tip->i_d.di_size) {
1813                error = -EFAULT;
1814                goto out_trans_cancel;
1815        }
1816
1817        trace_xfs_swap_extent_before(ip, 0);
1818        trace_xfs_swap_extent_before(tip, 1);
1819
1820        /* check inode formats now that data is flushed */
1821        error = xfs_swap_extents_check_format(ip, tip);
1822        if (error) {
1823                xfs_notice(mp,
1824                    "%s: inode 0x%llx format is incompatible for exchanging.",
1825                                __func__, ip->i_ino);
1826                goto out_trans_cancel;
1827        }
1828
1829        /*
1830         * Compare the current change & modify times with that
1831         * passed in.  If they differ, we abort this swap.
1832         * This is the mechanism used to ensure the calling
1833         * process that the file was not changed out from
1834         * under it.
1835         */
1836        if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1837            (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1838            (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1839            (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1840                error = -EBUSY;
1841                goto out_trans_cancel;
1842        }
1843
1844        /*
1845         * Note the trickiness in setting the log flags - we set the owner log
1846         * flag on the opposite inode (i.e. the inode we are setting the new
1847         * owner to be) because once we swap the forks and log that, log
1848         * recovery is going to see the fork as owned by the swapped inode,
1849         * not the pre-swapped inodes.
1850         */
1851        src_log_flags = XFS_ILOG_CORE;
1852        target_log_flags = XFS_ILOG_CORE;
1853
1854        if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1855                error = xfs_swap_extent_rmap(&tp, ip, tip);
1856        else
1857                error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1858                                &target_log_flags);
1859        if (error)
1860                goto out_trans_cancel;
1861
1862        /* Do we have to swap reflink flags? */
1863        if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1864            (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1865                f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1866                ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1867                ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1868                tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1869                tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
1870        }
1871
1872        /* Swap the cow forks. */
1873        if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1874                ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1875                ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1876
1877                swap(ip->i_cnextents, tip->i_cnextents);
1878                swap(ip->i_cowfp, tip->i_cowfp);
1879
1880                if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1881                        xfs_inode_set_cowblocks_tag(ip);
1882                else
1883                        xfs_inode_clear_cowblocks_tag(ip);
1884                if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1885                        xfs_inode_set_cowblocks_tag(tip);
1886                else
1887                        xfs_inode_clear_cowblocks_tag(tip);
1888        }
1889
1890        xfs_trans_log_inode(tp, ip,  src_log_flags);
1891        xfs_trans_log_inode(tp, tip, target_log_flags);
1892
1893        /*
1894         * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1895         * have inode number owner values in the bmbt blocks that still refer to
1896         * the old inode. Scan each bmbt to fix up the owner values with the
1897         * inode number of the current inode.
1898         */
1899        if (src_log_flags & XFS_ILOG_DOWNER) {
1900                error = xfs_swap_change_owner(&tp, ip, tip);
1901                if (error)
1902                        goto out_trans_cancel;
1903        }
1904        if (target_log_flags & XFS_ILOG_DOWNER) {
1905                error = xfs_swap_change_owner(&tp, tip, ip);
1906                if (error)
1907                        goto out_trans_cancel;
1908        }
1909
1910        /*
1911         * If this is a synchronous mount, make sure that the
1912         * transaction goes to disk before returning to the user.
1913         */
1914        if (mp->m_flags & XFS_MOUNT_WSYNC)
1915                xfs_trans_set_sync(tp);
1916
1917        error = xfs_trans_commit(tp);
1918
1919        trace_xfs_swap_extent_after(ip, 0);
1920        trace_xfs_swap_extent_after(tip, 1);
1921
1922out_unlock:
1923        xfs_iunlock(ip, lock_flags);
1924        xfs_iunlock(tip, lock_flags);
1925        unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1926        return error;
1927
1928out_trans_cancel:
1929        xfs_trans_cancel(tp);
1930        goto out_unlock;
1931}
1932