linux/fs/xfs/xfs_bmap_util.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * Copyright (c) 2012 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_bit.h"
  14#include "xfs_mount.h"
  15#include "xfs_da_format.h"
  16#include "xfs_defer.h"
  17#include "xfs_inode.h"
  18#include "xfs_btree.h"
  19#include "xfs_trans.h"
  20#include "xfs_extfree_item.h"
  21#include "xfs_alloc.h"
  22#include "xfs_bmap.h"
  23#include "xfs_bmap_util.h"
  24#include "xfs_bmap_btree.h"
  25#include "xfs_rtalloc.h"
  26#include "xfs_error.h"
  27#include "xfs_quota.h"
  28#include "xfs_trans_space.h"
  29#include "xfs_trace.h"
  30#include "xfs_icache.h"
  31#include "xfs_log.h"
  32#include "xfs_rmap_btree.h"
  33#include "xfs_iomap.h"
  34#include "xfs_reflink.h"
  35#include "xfs_refcount.h"
  36
  37/* Kernel only BMAP related definitions and functions */
  38
  39/*
  40 * Convert the given file system block to a disk block.  We have to treat it
  41 * differently based on whether the file is a real time file or not, because the
  42 * bmap code does.
  43 */
  44xfs_daddr_t
  45xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  46{
  47        return (XFS_IS_REALTIME_INODE(ip) ? \
  48                 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
  49                 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
  50}
  51
  52/*
  53 * Routine to zero an extent on disk allocated to the specific inode.
  54 *
  55 * The VFS functions take a linearised filesystem block offset, so we have to
  56 * convert the sparse xfs fsb to the right format first.
  57 * VFS types are real funky, too.
  58 */
  59int
  60xfs_zero_extent(
  61        struct xfs_inode *ip,
  62        xfs_fsblock_t   start_fsb,
  63        xfs_off_t       count_fsb)
  64{
  65        struct xfs_mount *mp = ip->i_mount;
  66        xfs_daddr_t     sector = xfs_fsb_to_db(ip, start_fsb);
  67        sector_t        block = XFS_BB_TO_FSBT(mp, sector);
  68
  69        return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
  70                block << (mp->m_super->s_blocksize_bits - 9),
  71                count_fsb << (mp->m_super->s_blocksize_bits - 9),
  72                GFP_NOFS, 0);
  73}
  74
  75#ifdef CONFIG_XFS_RT
  76int
  77xfs_bmap_rtalloc(
  78        struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
  79{
  80        int             error;          /* error return value */
  81        xfs_mount_t     *mp;            /* mount point structure */
  82        xfs_extlen_t    prod = 0;       /* product factor for allocators */
  83        xfs_extlen_t    mod = 0;        /* product factor for allocators */
  84        xfs_extlen_t    ralen = 0;      /* realtime allocation length */
  85        xfs_extlen_t    align;          /* minimum allocation alignment */
  86        xfs_rtblock_t   rtb;
  87
  88        mp = ap->ip->i_mount;
  89        align = xfs_get_extsz_hint(ap->ip);
  90        prod = align / mp->m_sb.sb_rextsize;
  91        error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
  92                                        align, 1, ap->eof, 0,
  93                                        ap->conv, &ap->offset, &ap->length);
  94        if (error)
  95                return error;
  96        ASSERT(ap->length);
  97        ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
  98
  99        /*
 100         * If the offset & length are not perfectly aligned
 101         * then kill prod, it will just get us in trouble.
 102         */
 103        div_u64_rem(ap->offset, align, &mod);
 104        if (mod || ap->length % align)
 105                prod = 1;
 106        /*
 107         * Set ralen to be the actual requested length in rtextents.
 108         */
 109        ralen = ap->length / mp->m_sb.sb_rextsize;
 110        /*
 111         * If the old value was close enough to MAXEXTLEN that
 112         * we rounded up to it, cut it back so it's valid again.
 113         * Note that if it's a really large request (bigger than
 114         * MAXEXTLEN), we don't hear about that number, and can't
 115         * adjust the starting point to match it.
 116         */
 117        if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
 118                ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
 119
 120        /*
 121         * Lock out modifications to both the RT bitmap and summary inodes
 122         */
 123        xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
 124        xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
 125        xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
 126        xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
 127
 128        /*
 129         * If it's an allocation to an empty file at offset 0,
 130         * pick an extent that will space things out in the rt area.
 131         */
 132        if (ap->eof && ap->offset == 0) {
 133                xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
 134
 135                error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
 136                if (error)
 137                        return error;
 138                ap->blkno = rtx * mp->m_sb.sb_rextsize;
 139        } else {
 140                ap->blkno = 0;
 141        }
 142
 143        xfs_bmap_adjacent(ap);
 144
 145        /*
 146         * Realtime allocation, done through xfs_rtallocate_extent.
 147         */
 148        do_div(ap->blkno, mp->m_sb.sb_rextsize);
 149        rtb = ap->blkno;
 150        ap->length = ralen;
 151        error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
 152                                &ralen, ap->wasdel, prod, &rtb);
 153        if (error)
 154                return error;
 155
 156        ap->blkno = rtb;
 157        if (ap->blkno != NULLFSBLOCK) {
 158                ap->blkno *= mp->m_sb.sb_rextsize;
 159                ralen *= mp->m_sb.sb_rextsize;
 160                ap->length = ralen;
 161                ap->ip->i_d.di_nblocks += ralen;
 162                xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
 163                if (ap->wasdel)
 164                        ap->ip->i_delayed_blks -= ralen;
 165                /*
 166                 * Adjust the disk quota also. This was reserved
 167                 * earlier.
 168                 */
 169                xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
 170                        ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
 171                                        XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
 172
 173                /* Zero the extent if we were asked to do so */
 174                if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
 175                        error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
 176                        if (error)
 177                                return error;
 178                }
 179        } else {
 180                ap->length = 0;
 181        }
 182        return 0;
 183}
 184#endif /* CONFIG_XFS_RT */
 185
 186/*
 187 * Check if the endoff is outside the last extent. If so the caller will grow
 188 * the allocation to a stripe unit boundary.  All offsets are considered outside
 189 * the end of file for an empty fork, so 1 is returned in *eof in that case.
 190 */
 191int
 192xfs_bmap_eof(
 193        struct xfs_inode        *ip,
 194        xfs_fileoff_t           endoff,
 195        int                     whichfork,
 196        int                     *eof)
 197{
 198        struct xfs_bmbt_irec    rec;
 199        int                     error;
 200
 201        error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
 202        if (error || *eof)
 203                return error;
 204
 205        *eof = endoff >= rec.br_startoff + rec.br_blockcount;
 206        return 0;
 207}
 208
 209/*
 210 * Extent tree block counting routines.
 211 */
 212
 213/*
 214 * Count leaf blocks given a range of extent records.  Delayed allocation
 215 * extents are not counted towards the totals.
 216 */
 217xfs_extnum_t
 218xfs_bmap_count_leaves(
 219        struct xfs_ifork        *ifp,
 220        xfs_filblks_t           *count)
 221{
 222        struct xfs_iext_cursor  icur;
 223        struct xfs_bmbt_irec    got;
 224        xfs_extnum_t            numrecs = 0;
 225
 226        for_each_xfs_iext(ifp, &icur, &got) {
 227                if (!isnullstartblock(got.br_startblock)) {
 228                        *count += got.br_blockcount;
 229                        numrecs++;
 230                }
 231        }
 232
 233        return numrecs;
 234}
 235
 236/*
 237 * Count leaf blocks given a range of extent records originally
 238 * in btree format.
 239 */
 240STATIC void
 241xfs_bmap_disk_count_leaves(
 242        struct xfs_mount        *mp,
 243        struct xfs_btree_block  *block,
 244        int                     numrecs,
 245        xfs_filblks_t           *count)
 246{
 247        int             b;
 248        xfs_bmbt_rec_t  *frp;
 249
 250        for (b = 1; b <= numrecs; b++) {
 251                frp = XFS_BMBT_REC_ADDR(mp, block, b);
 252                *count += xfs_bmbt_disk_get_blockcount(frp);
 253        }
 254}
 255
 256/*
 257 * Recursively walks each level of a btree
 258 * to count total fsblocks in use.
 259 */
 260STATIC int
 261xfs_bmap_count_tree(
 262        struct xfs_mount        *mp,
 263        struct xfs_trans        *tp,
 264        struct xfs_ifork        *ifp,
 265        xfs_fsblock_t           blockno,
 266        int                     levelin,
 267        xfs_extnum_t            *nextents,
 268        xfs_filblks_t           *count)
 269{
 270        int                     error;
 271        struct xfs_buf          *bp, *nbp;
 272        int                     level = levelin;
 273        __be64                  *pp;
 274        xfs_fsblock_t           bno = blockno;
 275        xfs_fsblock_t           nextbno;
 276        struct xfs_btree_block  *block, *nextblock;
 277        int                     numrecs;
 278
 279        error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
 280                                                &xfs_bmbt_buf_ops);
 281        if (error)
 282                return error;
 283        *count += 1;
 284        block = XFS_BUF_TO_BLOCK(bp);
 285
 286        if (--level) {
 287                /* Not at node above leaves, count this level of nodes */
 288                nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 289                while (nextbno != NULLFSBLOCK) {
 290                        error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
 291                                                XFS_BMAP_BTREE_REF,
 292                                                &xfs_bmbt_buf_ops);
 293                        if (error)
 294                                return error;
 295                        *count += 1;
 296                        nextblock = XFS_BUF_TO_BLOCK(nbp);
 297                        nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
 298                        xfs_trans_brelse(tp, nbp);
 299                }
 300
 301                /* Dive to the next level */
 302                pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
 303                bno = be64_to_cpu(*pp);
 304                error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
 305                                count);
 306                if (error) {
 307                        xfs_trans_brelse(tp, bp);
 308                        XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
 309                                         XFS_ERRLEVEL_LOW, mp);
 310                        return -EFSCORRUPTED;
 311                }
 312                xfs_trans_brelse(tp, bp);
 313        } else {
 314                /* count all level 1 nodes and their leaves */
 315                for (;;) {
 316                        nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 317                        numrecs = be16_to_cpu(block->bb_numrecs);
 318                        (*nextents) += numrecs;
 319                        xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
 320                        xfs_trans_brelse(tp, bp);
 321                        if (nextbno == NULLFSBLOCK)
 322                                break;
 323                        bno = nextbno;
 324                        error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
 325                                                XFS_BMAP_BTREE_REF,
 326                                                &xfs_bmbt_buf_ops);
 327                        if (error)
 328                                return error;
 329                        *count += 1;
 330                        block = XFS_BUF_TO_BLOCK(bp);
 331                }
 332        }
 333        return 0;
 334}
 335
 336/*
 337 * Count fsblocks of the given fork.  Delayed allocation extents are
 338 * not counted towards the totals.
 339 */
 340int
 341xfs_bmap_count_blocks(
 342        struct xfs_trans        *tp,
 343        struct xfs_inode        *ip,
 344        int                     whichfork,
 345        xfs_extnum_t            *nextents,
 346        xfs_filblks_t           *count)
 347{
 348        struct xfs_mount        *mp;    /* file system mount structure */
 349        __be64                  *pp;    /* pointer to block address */
 350        struct xfs_btree_block  *block; /* current btree block */
 351        struct xfs_ifork        *ifp;   /* fork structure */
 352        xfs_fsblock_t           bno;    /* block # of "block" */
 353        int                     level;  /* btree level, for checking */
 354        int                     error;
 355
 356        bno = NULLFSBLOCK;
 357        mp = ip->i_mount;
 358        *nextents = 0;
 359        *count = 0;
 360        ifp = XFS_IFORK_PTR(ip, whichfork);
 361        if (!ifp)
 362                return 0;
 363
 364        switch (XFS_IFORK_FORMAT(ip, whichfork)) {
 365        case XFS_DINODE_FMT_EXTENTS:
 366                *nextents = xfs_bmap_count_leaves(ifp, count);
 367                return 0;
 368        case XFS_DINODE_FMT_BTREE:
 369                if (!(ifp->if_flags & XFS_IFEXTENTS)) {
 370                        error = xfs_iread_extents(tp, ip, whichfork);
 371                        if (error)
 372                                return error;
 373                }
 374
 375                /*
 376                 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 377                 */
 378                block = ifp->if_broot;
 379                level = be16_to_cpu(block->bb_level);
 380                ASSERT(level > 0);
 381                pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
 382                bno = be64_to_cpu(*pp);
 383                ASSERT(bno != NULLFSBLOCK);
 384                ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
 385                ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
 386
 387                error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
 388                                nextents, count);
 389                if (error) {
 390                        XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
 391                                        XFS_ERRLEVEL_LOW, mp);
 392                        return -EFSCORRUPTED;
 393                }
 394                return 0;
 395        }
 396
 397        return 0;
 398}
 399
 400static int
 401xfs_getbmap_report_one(
 402        struct xfs_inode        *ip,
 403        struct getbmapx         *bmv,
 404        struct kgetbmap         *out,
 405        int64_t                 bmv_end,
 406        struct xfs_bmbt_irec    *got)
 407{
 408        struct kgetbmap         *p = out + bmv->bmv_entries;
 409        bool                    shared = false, trimmed = false;
 410        int                     error;
 411
 412        error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
 413        if (error)
 414                return error;
 415
 416        if (isnullstartblock(got->br_startblock) ||
 417            got->br_startblock == DELAYSTARTBLOCK) {
 418                /*
 419                 * Delalloc extents that start beyond EOF can occur due to
 420                 * speculative EOF allocation when the delalloc extent is larger
 421                 * than the largest freespace extent at conversion time.  These
 422                 * extents cannot be converted by data writeback, so can exist
 423                 * here even if we are not supposed to be finding delalloc
 424                 * extents.
 425                 */
 426                if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
 427                        ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
 428
 429                p->bmv_oflags |= BMV_OF_DELALLOC;
 430                p->bmv_block = -2;
 431        } else {
 432                p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
 433        }
 434
 435        if (got->br_state == XFS_EXT_UNWRITTEN &&
 436            (bmv->bmv_iflags & BMV_IF_PREALLOC))
 437                p->bmv_oflags |= BMV_OF_PREALLOC;
 438
 439        if (shared)
 440                p->bmv_oflags |= BMV_OF_SHARED;
 441
 442        p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
 443        p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
 444
 445        bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 446        bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 447        bmv->bmv_entries++;
 448        return 0;
 449}
 450
 451static void
 452xfs_getbmap_report_hole(
 453        struct xfs_inode        *ip,
 454        struct getbmapx         *bmv,
 455        struct kgetbmap         *out,
 456        int64_t                 bmv_end,
 457        xfs_fileoff_t           bno,
 458        xfs_fileoff_t           end)
 459{
 460        struct kgetbmap         *p = out + bmv->bmv_entries;
 461
 462        if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
 463                return;
 464
 465        p->bmv_block = -1;
 466        p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
 467        p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
 468
 469        bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 470        bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 471        bmv->bmv_entries++;
 472}
 473
 474static inline bool
 475xfs_getbmap_full(
 476        struct getbmapx         *bmv)
 477{
 478        return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
 479}
 480
 481static bool
 482xfs_getbmap_next_rec(
 483        struct xfs_bmbt_irec    *rec,
 484        xfs_fileoff_t           total_end)
 485{
 486        xfs_fileoff_t           end = rec->br_startoff + rec->br_blockcount;
 487
 488        if (end == total_end)
 489                return false;
 490
 491        rec->br_startoff += rec->br_blockcount;
 492        if (!isnullstartblock(rec->br_startblock) &&
 493            rec->br_startblock != DELAYSTARTBLOCK)
 494                rec->br_startblock += rec->br_blockcount;
 495        rec->br_blockcount = total_end - end;
 496        return true;
 497}
 498
 499/*
 500 * Get inode's extents as described in bmv, and format for output.
 501 * Calls formatter to fill the user's buffer until all extents
 502 * are mapped, until the passed-in bmv->bmv_count slots have
 503 * been filled, or until the formatter short-circuits the loop,
 504 * if it is tracking filled-in extents on its own.
 505 */
 506int                                             /* error code */
 507xfs_getbmap(
 508        struct xfs_inode        *ip,
 509        struct getbmapx         *bmv,           /* user bmap structure */
 510        struct kgetbmap         *out)
 511{
 512        struct xfs_mount        *mp = ip->i_mount;
 513        int                     iflags = bmv->bmv_iflags;
 514        int                     whichfork, lock, error = 0;
 515        int64_t                 bmv_end, max_len;
 516        xfs_fileoff_t           bno, first_bno;
 517        struct xfs_ifork        *ifp;
 518        struct xfs_bmbt_irec    got, rec;
 519        xfs_filblks_t           len;
 520        struct xfs_iext_cursor  icur;
 521
 522        if (bmv->bmv_iflags & ~BMV_IF_VALID)
 523                return -EINVAL;
 524#ifndef DEBUG
 525        /* Only allow CoW fork queries if we're debugging. */
 526        if (iflags & BMV_IF_COWFORK)
 527                return -EINVAL;
 528#endif
 529        if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
 530                return -EINVAL;
 531
 532        if (bmv->bmv_length < -1)
 533                return -EINVAL;
 534        bmv->bmv_entries = 0;
 535        if (bmv->bmv_length == 0)
 536                return 0;
 537
 538        if (iflags & BMV_IF_ATTRFORK)
 539                whichfork = XFS_ATTR_FORK;
 540        else if (iflags & BMV_IF_COWFORK)
 541                whichfork = XFS_COW_FORK;
 542        else
 543                whichfork = XFS_DATA_FORK;
 544        ifp = XFS_IFORK_PTR(ip, whichfork);
 545
 546        xfs_ilock(ip, XFS_IOLOCK_SHARED);
 547        switch (whichfork) {
 548        case XFS_ATTR_FORK:
 549                if (!XFS_IFORK_Q(ip))
 550                        goto out_unlock_iolock;
 551
 552                max_len = 1LL << 32;
 553                lock = xfs_ilock_attr_map_shared(ip);
 554                break;
 555        case XFS_COW_FORK:
 556                /* No CoW fork? Just return */
 557                if (!ifp)
 558                        goto out_unlock_iolock;
 559
 560                if (xfs_get_cowextsz_hint(ip))
 561                        max_len = mp->m_super->s_maxbytes;
 562                else
 563                        max_len = XFS_ISIZE(ip);
 564
 565                lock = XFS_ILOCK_SHARED;
 566                xfs_ilock(ip, lock);
 567                break;
 568        case XFS_DATA_FORK:
 569                if (!(iflags & BMV_IF_DELALLOC) &&
 570                    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
 571                        error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 572                        if (error)
 573                                goto out_unlock_iolock;
 574
 575                        /*
 576                         * Even after flushing the inode, there can still be
 577                         * delalloc blocks on the inode beyond EOF due to
 578                         * speculative preallocation.  These are not removed
 579                         * until the release function is called or the inode
 580                         * is inactivated.  Hence we cannot assert here that
 581                         * ip->i_delayed_blks == 0.
 582                         */
 583                }
 584
 585                if (xfs_get_extsz_hint(ip) ||
 586                    (ip->i_d.di_flags &
 587                     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
 588                        max_len = mp->m_super->s_maxbytes;
 589                else
 590                        max_len = XFS_ISIZE(ip);
 591
 592                lock = xfs_ilock_data_map_shared(ip);
 593                break;
 594        }
 595
 596        switch (XFS_IFORK_FORMAT(ip, whichfork)) {
 597        case XFS_DINODE_FMT_EXTENTS:
 598        case XFS_DINODE_FMT_BTREE:
 599                break;
 600        case XFS_DINODE_FMT_LOCAL:
 601                /* Local format inode forks report no extents. */
 602                goto out_unlock_ilock;
 603        default:
 604                error = -EINVAL;
 605                goto out_unlock_ilock;
 606        }
 607
 608        if (bmv->bmv_length == -1) {
 609                max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
 610                bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
 611        }
 612
 613        bmv_end = bmv->bmv_offset + bmv->bmv_length;
 614
 615        first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
 616        len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
 617
 618        if (!(ifp->if_flags & XFS_IFEXTENTS)) {
 619                error = xfs_iread_extents(NULL, ip, whichfork);
 620                if (error)
 621                        goto out_unlock_ilock;
 622        }
 623
 624        if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
 625                /*
 626                 * Report a whole-file hole if the delalloc flag is set to
 627                 * stay compatible with the old implementation.
 628                 */
 629                if (iflags & BMV_IF_DELALLOC)
 630                        xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 631                                        XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
 632                goto out_unlock_ilock;
 633        }
 634
 635        while (!xfs_getbmap_full(bmv)) {
 636                xfs_trim_extent(&got, first_bno, len);
 637
 638                /*
 639                 * Report an entry for a hole if this extent doesn't directly
 640                 * follow the previous one.
 641                 */
 642                if (got.br_startoff > bno) {
 643                        xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 644                                        got.br_startoff);
 645                        if (xfs_getbmap_full(bmv))
 646                                break;
 647                }
 648
 649                /*
 650                 * In order to report shared extents accurately, we report each
 651                 * distinct shared / unshared part of a single bmbt record with
 652                 * an individual getbmapx record.
 653                 */
 654                bno = got.br_startoff + got.br_blockcount;
 655                rec = got;
 656                do {
 657                        error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
 658                                        &rec);
 659                        if (error || xfs_getbmap_full(bmv))
 660                                goto out_unlock_ilock;
 661                } while (xfs_getbmap_next_rec(&rec, bno));
 662
 663                if (!xfs_iext_next_extent(ifp, &icur, &got)) {
 664                        xfs_fileoff_t   end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
 665
 666                        out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
 667
 668                        if (whichfork != XFS_ATTR_FORK && bno < end &&
 669                            !xfs_getbmap_full(bmv)) {
 670                                xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
 671                                                bno, end);
 672                        }
 673                        break;
 674                }
 675
 676                if (bno >= first_bno + len)
 677                        break;
 678        }
 679
 680out_unlock_ilock:
 681        xfs_iunlock(ip, lock);
 682out_unlock_iolock:
 683        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 684        return error;
 685}
 686
 687/*
 688 * Dead simple method of punching delalyed allocation blocks from a range in
 689 * the inode.  This will always punch out both the start and end blocks, even
 690 * if the ranges only partially overlap them, so it is up to the caller to
 691 * ensure that partial blocks are not passed in.
 692 */
 693int
 694xfs_bmap_punch_delalloc_range(
 695        struct xfs_inode        *ip,
 696        xfs_fileoff_t           start_fsb,
 697        xfs_fileoff_t           length)
 698{
 699        struct xfs_ifork        *ifp = &ip->i_df;
 700        xfs_fileoff_t           end_fsb = start_fsb + length;
 701        struct xfs_bmbt_irec    got, del;
 702        struct xfs_iext_cursor  icur;
 703        int                     error = 0;
 704
 705        ASSERT(ifp->if_flags & XFS_IFEXTENTS);
 706
 707        xfs_ilock(ip, XFS_ILOCK_EXCL);
 708        if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
 709                goto out_unlock;
 710
 711        while (got.br_startoff + got.br_blockcount > start_fsb) {
 712                del = got;
 713                xfs_trim_extent(&del, start_fsb, length);
 714
 715                /*
 716                 * A delete can push the cursor forward. Step back to the
 717                 * previous extent on non-delalloc or extents outside the
 718                 * target range.
 719                 */
 720                if (!del.br_blockcount ||
 721                    !isnullstartblock(del.br_startblock)) {
 722                        if (!xfs_iext_prev_extent(ifp, &icur, &got))
 723                                break;
 724                        continue;
 725                }
 726
 727                error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
 728                                                  &got, &del);
 729                if (error || !xfs_iext_get_extent(ifp, &icur, &got))
 730                        break;
 731        }
 732
 733out_unlock:
 734        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 735        return error;
 736}
 737
 738/*
 739 * Test whether it is appropriate to check an inode for and free post EOF
 740 * blocks. The 'force' parameter determines whether we should also consider
 741 * regular files that are marked preallocated or append-only.
 742 */
 743bool
 744xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
 745{
 746        /* prealloc/delalloc exists only on regular files */
 747        if (!S_ISREG(VFS_I(ip)->i_mode))
 748                return false;
 749
 750        /*
 751         * Zero sized files with no cached pages and delalloc blocks will not
 752         * have speculative prealloc/delalloc blocks to remove.
 753         */
 754        if (VFS_I(ip)->i_size == 0 &&
 755            VFS_I(ip)->i_mapping->nrpages == 0 &&
 756            ip->i_delayed_blks == 0)
 757                return false;
 758
 759        /* If we haven't read in the extent list, then don't do it now. */
 760        if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
 761                return false;
 762
 763        /*
 764         * Do not free real preallocated or append-only files unless the file
 765         * has delalloc blocks and we are forced to remove them.
 766         */
 767        if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
 768                if (!force || ip->i_delayed_blks == 0)
 769                        return false;
 770
 771        return true;
 772}
 773
 774/*
 775 * This is called to free any blocks beyond eof. The caller must hold
 776 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
 777 * reference to the inode.
 778 */
 779int
 780xfs_free_eofblocks(
 781        struct xfs_inode        *ip)
 782{
 783        struct xfs_trans        *tp;
 784        int                     error;
 785        xfs_fileoff_t           end_fsb;
 786        xfs_fileoff_t           last_fsb;
 787        xfs_filblks_t           map_len;
 788        int                     nimaps;
 789        struct xfs_bmbt_irec    imap;
 790        struct xfs_mount        *mp = ip->i_mount;
 791
 792        /*
 793         * Figure out if there are any blocks beyond the end
 794         * of the file.  If not, then there is nothing to do.
 795         */
 796        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
 797        last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 798        if (last_fsb <= end_fsb)
 799                return 0;
 800        map_len = last_fsb - end_fsb;
 801
 802        nimaps = 1;
 803        xfs_ilock(ip, XFS_ILOCK_SHARED);
 804        error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
 805        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 806
 807        /*
 808         * If there are blocks after the end of file, truncate the file to its
 809         * current size to free them up.
 810         */
 811        if (!error && (nimaps != 0) &&
 812            (imap.br_startblock != HOLESTARTBLOCK ||
 813             ip->i_delayed_blks)) {
 814                /*
 815                 * Attach the dquots to the inode up front.
 816                 */
 817                error = xfs_qm_dqattach(ip);
 818                if (error)
 819                        return error;
 820
 821                /* wait on dio to ensure i_size has settled */
 822                inode_dio_wait(VFS_I(ip));
 823
 824                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
 825                                &tp);
 826                if (error) {
 827                        ASSERT(XFS_FORCED_SHUTDOWN(mp));
 828                        return error;
 829                }
 830
 831                xfs_ilock(ip, XFS_ILOCK_EXCL);
 832                xfs_trans_ijoin(tp, ip, 0);
 833
 834                /*
 835                 * Do not update the on-disk file size.  If we update the
 836                 * on-disk file size and then the system crashes before the
 837                 * contents of the file are flushed to disk then the files
 838                 * may be full of holes (ie NULL files bug).
 839                 */
 840                error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
 841                                        XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
 842                if (error) {
 843                        /*
 844                         * If we get an error at this point we simply don't
 845                         * bother truncating the file.
 846                         */
 847                        xfs_trans_cancel(tp);
 848                } else {
 849                        error = xfs_trans_commit(tp);
 850                        if (!error)
 851                                xfs_inode_clear_eofblocks_tag(ip);
 852                }
 853
 854                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 855        }
 856        return error;
 857}
 858
 859int
 860xfs_alloc_file_space(
 861        struct xfs_inode        *ip,
 862        xfs_off_t               offset,
 863        xfs_off_t               len,
 864        int                     alloc_type)
 865{
 866        xfs_mount_t             *mp = ip->i_mount;
 867        xfs_off_t               count;
 868        xfs_filblks_t           allocated_fsb;
 869        xfs_filblks_t           allocatesize_fsb;
 870        xfs_extlen_t            extsz, temp;
 871        xfs_fileoff_t           startoffset_fsb;
 872        int                     nimaps;
 873        int                     quota_flag;
 874        int                     rt;
 875        xfs_trans_t             *tp;
 876        xfs_bmbt_irec_t         imaps[1], *imapp;
 877        uint                    qblocks, resblks, resrtextents;
 878        int                     error;
 879
 880        trace_xfs_alloc_file_space(ip);
 881
 882        if (XFS_FORCED_SHUTDOWN(mp))
 883                return -EIO;
 884
 885        error = xfs_qm_dqattach(ip);
 886        if (error)
 887                return error;
 888
 889        if (len <= 0)
 890                return -EINVAL;
 891
 892        rt = XFS_IS_REALTIME_INODE(ip);
 893        extsz = xfs_get_extsz_hint(ip);
 894
 895        count = len;
 896        imapp = &imaps[0];
 897        nimaps = 1;
 898        startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
 899        allocatesize_fsb = XFS_B_TO_FSB(mp, count);
 900
 901        /*
 902         * Allocate file space until done or until there is an error
 903         */
 904        while (allocatesize_fsb && !error) {
 905                xfs_fileoff_t   s, e;
 906
 907                /*
 908                 * Determine space reservations for data/realtime.
 909                 */
 910                if (unlikely(extsz)) {
 911                        s = startoffset_fsb;
 912                        do_div(s, extsz);
 913                        s *= extsz;
 914                        e = startoffset_fsb + allocatesize_fsb;
 915                        div_u64_rem(startoffset_fsb, extsz, &temp);
 916                        if (temp)
 917                                e += temp;
 918                        div_u64_rem(e, extsz, &temp);
 919                        if (temp)
 920                                e += extsz - temp;
 921                } else {
 922                        s = 0;
 923                        e = allocatesize_fsb;
 924                }
 925
 926                /*
 927                 * The transaction reservation is limited to a 32-bit block
 928                 * count, hence we need to limit the number of blocks we are
 929                 * trying to reserve to avoid an overflow. We can't allocate
 930                 * more than @nimaps extents, and an extent is limited on disk
 931                 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
 932                 */
 933                resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
 934                if (unlikely(rt)) {
 935                        resrtextents = qblocks = resblks;
 936                        resrtextents /= mp->m_sb.sb_rextsize;
 937                        resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 938                        quota_flag = XFS_QMOPT_RES_RTBLKS;
 939                } else {
 940                        resrtextents = 0;
 941                        resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
 942                        quota_flag = XFS_QMOPT_RES_REGBLKS;
 943                }
 944
 945                /*
 946                 * Allocate and setup the transaction.
 947                 */
 948                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
 949                                resrtextents, 0, &tp);
 950
 951                /*
 952                 * Check for running out of space
 953                 */
 954                if (error) {
 955                        /*
 956                         * Free the transaction structure.
 957                         */
 958                        ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
 959                        break;
 960                }
 961                xfs_ilock(ip, XFS_ILOCK_EXCL);
 962                error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
 963                                                      0, quota_flag);
 964                if (error)
 965                        goto error1;
 966
 967                xfs_trans_ijoin(tp, ip, 0);
 968
 969                error = xfs_bmapi_write(tp, ip, startoffset_fsb,
 970                                        allocatesize_fsb, alloc_type, resblks,
 971                                        imapp, &nimaps);
 972                if (error)
 973                        goto error0;
 974
 975                /*
 976                 * Complete the transaction
 977                 */
 978                error = xfs_trans_commit(tp);
 979                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 980                if (error)
 981                        break;
 982
 983                allocated_fsb = imapp->br_blockcount;
 984
 985                if (nimaps == 0) {
 986                        error = -ENOSPC;
 987                        break;
 988                }
 989
 990                startoffset_fsb += allocated_fsb;
 991                allocatesize_fsb -= allocated_fsb;
 992        }
 993
 994        return error;
 995
 996error0: /* unlock inode, unreserve quota blocks, cancel trans */
 997        xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
 998
 999error1: /* Just cancel transaction */
1000        xfs_trans_cancel(tp);
1001        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1002        return error;
1003}
1004
1005static int
1006xfs_unmap_extent(
1007        struct xfs_inode        *ip,
1008        xfs_fileoff_t           startoffset_fsb,
1009        xfs_filblks_t           len_fsb,
1010        int                     *done)
1011{
1012        struct xfs_mount        *mp = ip->i_mount;
1013        struct xfs_trans        *tp;
1014        uint                    resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1015        int                     error;
1016
1017        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1018        if (error) {
1019                ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1020                return error;
1021        }
1022
1023        xfs_ilock(ip, XFS_ILOCK_EXCL);
1024        error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1025                        ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1026        if (error)
1027                goto out_trans_cancel;
1028
1029        xfs_trans_ijoin(tp, ip, 0);
1030
1031        error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
1032        if (error)
1033                goto out_trans_cancel;
1034
1035        error = xfs_trans_commit(tp);
1036out_unlock:
1037        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1038        return error;
1039
1040out_trans_cancel:
1041        xfs_trans_cancel(tp);
1042        goto out_unlock;
1043}
1044
1045static int
1046xfs_adjust_extent_unmap_boundaries(
1047        struct xfs_inode        *ip,
1048        xfs_fileoff_t           *startoffset_fsb,
1049        xfs_fileoff_t           *endoffset_fsb)
1050{
1051        struct xfs_mount        *mp = ip->i_mount;
1052        struct xfs_bmbt_irec    imap;
1053        int                     nimap, error;
1054        xfs_extlen_t            mod = 0;
1055
1056        nimap = 1;
1057        error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1058        if (error)
1059                return error;
1060
1061        if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1062                ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1063                div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod);
1064                if (mod)
1065                        *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1066        }
1067
1068        nimap = 1;
1069        error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1070        if (error)
1071                return error;
1072
1073        if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1074                ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1075                mod++;
1076                if (mod && mod != mp->m_sb.sb_rextsize)
1077                        *endoffset_fsb -= mod;
1078        }
1079
1080        return 0;
1081}
1082
1083static int
1084xfs_flush_unmap_range(
1085        struct xfs_inode        *ip,
1086        xfs_off_t               offset,
1087        xfs_off_t               len)
1088{
1089        struct xfs_mount        *mp = ip->i_mount;
1090        struct inode            *inode = VFS_I(ip);
1091        xfs_off_t               rounding, start, end;
1092        int                     error;
1093
1094        /* wait for the completion of any pending DIOs */
1095        inode_dio_wait(inode);
1096
1097        rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1098        start = round_down(offset, rounding);
1099        end = round_up(offset + len, rounding) - 1;
1100
1101        error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1102        if (error)
1103                return error;
1104        truncate_pagecache_range(inode, start, end);
1105        return 0;
1106}
1107
1108int
1109xfs_free_file_space(
1110        struct xfs_inode        *ip,
1111        xfs_off_t               offset,
1112        xfs_off_t               len)
1113{
1114        struct xfs_mount        *mp = ip->i_mount;
1115        xfs_fileoff_t           startoffset_fsb;
1116        xfs_fileoff_t           endoffset_fsb;
1117        int                     done = 0, error;
1118
1119        trace_xfs_free_file_space(ip);
1120
1121        error = xfs_qm_dqattach(ip);
1122        if (error)
1123                return error;
1124
1125        if (len <= 0)   /* if nothing being freed */
1126                return 0;
1127
1128        error = xfs_flush_unmap_range(ip, offset, len);
1129        if (error)
1130                return error;
1131
1132        startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1133        endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1134
1135        /*
1136         * Need to zero the stuff we're not freeing, on disk.  If it's a RT file
1137         * and we can't use unwritten extents then we actually need to ensure
1138         * to zero the whole extent, otherwise we just need to take of block
1139         * boundaries, and xfs_bunmapi will handle the rest.
1140         */
1141        if (XFS_IS_REALTIME_INODE(ip) &&
1142            !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1143                error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1144                                &endoffset_fsb);
1145                if (error)
1146                        return error;
1147        }
1148
1149        if (endoffset_fsb > startoffset_fsb) {
1150                while (!done) {
1151                        error = xfs_unmap_extent(ip, startoffset_fsb,
1152                                        endoffset_fsb - startoffset_fsb, &done);
1153                        if (error)
1154                                return error;
1155                }
1156        }
1157
1158        /*
1159         * Now that we've unmap all full blocks we'll have to zero out any
1160         * partial block at the beginning and/or end.  iomap_zero_range is smart
1161         * enough to skip any holes, including those we just created, but we
1162         * must take care not to zero beyond EOF and enlarge i_size.
1163         */
1164        if (offset >= XFS_ISIZE(ip))
1165                return 0;
1166        if (offset + len > XFS_ISIZE(ip))
1167                len = XFS_ISIZE(ip) - offset;
1168        error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
1169        if (error)
1170                return error;
1171
1172        /*
1173         * If we zeroed right up to EOF and EOF straddles a page boundary we
1174         * must make sure that the post-EOF area is also zeroed because the
1175         * page could be mmap'd and iomap_zero_range doesn't do that for us.
1176         * Writeback of the eof page will do this, albeit clumsily.
1177         */
1178        if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
1179                error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1180                                (offset + len) & ~PAGE_MASK, LLONG_MAX);
1181        }
1182
1183        return error;
1184}
1185
1186/*
1187 * Preallocate and zero a range of a file. This mechanism has the allocation
1188 * semantics of fallocate and in addition converts data in the range to zeroes.
1189 */
1190int
1191xfs_zero_file_space(
1192        struct xfs_inode        *ip,
1193        xfs_off_t               offset,
1194        xfs_off_t               len)
1195{
1196        struct xfs_mount        *mp = ip->i_mount;
1197        uint                    blksize;
1198        int                     error;
1199
1200        trace_xfs_zero_file_space(ip);
1201
1202        blksize = 1 << mp->m_sb.sb_blocklog;
1203
1204        /*
1205         * Punch a hole and prealloc the range. We use hole punch rather than
1206         * unwritten extent conversion for two reasons:
1207         *
1208         * 1.) Hole punch handles partial block zeroing for us.
1209         *
1210         * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1211         * by virtue of the hole punch.
1212         */
1213        error = xfs_free_file_space(ip, offset, len);
1214        if (error)
1215                goto out;
1216
1217        error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1218                                     round_up(offset + len, blksize) -
1219                                     round_down(offset, blksize),
1220                                     XFS_BMAPI_PREALLOC);
1221out:
1222        return error;
1223
1224}
1225
1226static int
1227xfs_prepare_shift(
1228        struct xfs_inode        *ip,
1229        loff_t                  offset)
1230{
1231        int                     error;
1232
1233        /*
1234         * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1235         * into the accessible region of the file.
1236         */
1237        if (xfs_can_free_eofblocks(ip, true)) {
1238                error = xfs_free_eofblocks(ip);
1239                if (error)
1240                        return error;
1241        }
1242
1243        /*
1244         * Writeback and invalidate cache for the remainder of the file as we're
1245         * about to shift down every extent from offset to EOF.
1246         */
1247        error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
1248        if (error)
1249                return error;
1250        error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1251                                        offset >> PAGE_SHIFT, -1);
1252        if (error)
1253                return error;
1254
1255        /*
1256         * Clean out anything hanging around in the cow fork now that
1257         * we've flushed all the dirty data out to disk to avoid having
1258         * CoW extents at the wrong offsets.
1259         */
1260        if (xfs_inode_has_cow_data(ip)) {
1261                error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1262                                true);
1263                if (error)
1264                        return error;
1265        }
1266
1267        return 0;
1268}
1269
1270/*
1271 * xfs_collapse_file_space()
1272 *      This routine frees disk space and shift extent for the given file.
1273 *      The first thing we do is to free data blocks in the specified range
1274 *      by calling xfs_free_file_space(). It would also sync dirty data
1275 *      and invalidate page cache over the region on which collapse range
1276 *      is working. And Shift extent records to the left to cover a hole.
1277 * RETURNS:
1278 *      0 on success
1279 *      errno on error
1280 *
1281 */
1282int
1283xfs_collapse_file_space(
1284        struct xfs_inode        *ip,
1285        xfs_off_t               offset,
1286        xfs_off_t               len)
1287{
1288        struct xfs_mount        *mp = ip->i_mount;
1289        struct xfs_trans        *tp;
1290        int                     error;
1291        xfs_fileoff_t           next_fsb = XFS_B_TO_FSB(mp, offset + len);
1292        xfs_fileoff_t           shift_fsb = XFS_B_TO_FSB(mp, len);
1293        uint                    resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1294        bool                    done = false;
1295
1296        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1297        ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1298
1299        trace_xfs_collapse_file_space(ip);
1300
1301        error = xfs_free_file_space(ip, offset, len);
1302        if (error)
1303                return error;
1304
1305        error = xfs_prepare_shift(ip, offset);
1306        if (error)
1307                return error;
1308
1309        while (!error && !done) {
1310                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1311                                        &tp);
1312                if (error)
1313                        break;
1314
1315                xfs_ilock(ip, XFS_ILOCK_EXCL);
1316                error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1317                                ip->i_gdquot, ip->i_pdquot, resblks, 0,
1318                                XFS_QMOPT_RES_REGBLKS);
1319                if (error)
1320                        goto out_trans_cancel;
1321                xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1322
1323                error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1324                                &done);
1325                if (error)
1326                        goto out_trans_cancel;
1327
1328                error = xfs_trans_commit(tp);
1329        }
1330
1331        return error;
1332
1333out_trans_cancel:
1334        xfs_trans_cancel(tp);
1335        return error;
1336}
1337
1338/*
1339 * xfs_insert_file_space()
1340 *      This routine create hole space by shifting extents for the given file.
1341 *      The first thing we do is to sync dirty data and invalidate page cache
1342 *      over the region on which insert range is working. And split an extent
1343 *      to two extents at given offset by calling xfs_bmap_split_extent.
1344 *      And shift all extent records which are laying between [offset,
1345 *      last allocated extent] to the right to reserve hole range.
1346 * RETURNS:
1347 *      0 on success
1348 *      errno on error
1349 */
1350int
1351xfs_insert_file_space(
1352        struct xfs_inode        *ip,
1353        loff_t                  offset,
1354        loff_t                  len)
1355{
1356        struct xfs_mount        *mp = ip->i_mount;
1357        struct xfs_trans        *tp;
1358        int                     error;
1359        xfs_fileoff_t           stop_fsb = XFS_B_TO_FSB(mp, offset);
1360        xfs_fileoff_t           next_fsb = NULLFSBLOCK;
1361        xfs_fileoff_t           shift_fsb = XFS_B_TO_FSB(mp, len);
1362        bool                    done = false;
1363
1364        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1365        ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1366
1367        trace_xfs_insert_file_space(ip);
1368
1369        error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1370        if (error)
1371                return error;
1372
1373        error = xfs_prepare_shift(ip, offset);
1374        if (error)
1375                return error;
1376
1377        /*
1378         * The extent shifting code works on extent granularity. So, if stop_fsb
1379         * is not the starting block of extent, we need to split the extent at
1380         * stop_fsb.
1381         */
1382        error = xfs_bmap_split_extent(ip, stop_fsb);
1383        if (error)
1384                return error;
1385
1386        while (!error && !done) {
1387                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
1388                                        &tp);
1389                if (error)
1390                        break;
1391
1392                xfs_ilock(ip, XFS_ILOCK_EXCL);
1393                xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1394                error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1395                                &done, stop_fsb);
1396                if (error)
1397                        goto out_trans_cancel;
1398
1399                error = xfs_trans_commit(tp);
1400        }
1401
1402        return error;
1403
1404out_trans_cancel:
1405        xfs_trans_cancel(tp);
1406        return error;
1407}
1408
1409/*
1410 * We need to check that the format of the data fork in the temporary inode is
1411 * valid for the target inode before doing the swap. This is not a problem with
1412 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1413 * data fork depending on the space the attribute fork is taking so we can get
1414 * invalid formats on the target inode.
1415 *
1416 * E.g. target has space for 7 extents in extent format, temp inode only has
1417 * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1418 * btree, but when swapped it needs to be in extent format. Hence we can't just
1419 * blindly swap data forks on attr2 filesystems.
1420 *
1421 * Note that we check the swap in both directions so that we don't end up with
1422 * a corrupt temporary inode, either.
1423 *
1424 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1425 * inode will prevent this situation from occurring, so all we do here is
1426 * reject and log the attempt. basically we are putting the responsibility on
1427 * userspace to get this right.
1428 */
1429static int
1430xfs_swap_extents_check_format(
1431        struct xfs_inode        *ip,    /* target inode */
1432        struct xfs_inode        *tip)   /* tmp inode */
1433{
1434
1435        /* Should never get a local format */
1436        if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1437            tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1438                return -EINVAL;
1439
1440        /*
1441         * if the target inode has less extents that then temporary inode then
1442         * why did userspace call us?
1443         */
1444        if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1445                return -EINVAL;
1446
1447        /*
1448         * If we have to use the (expensive) rmap swap method, we can
1449         * handle any number of extents and any format.
1450         */
1451        if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1452                return 0;
1453
1454        /*
1455         * if the target inode is in extent form and the temp inode is in btree
1456         * form then we will end up with the target inode in the wrong format
1457         * as we already know there are less extents in the temp inode.
1458         */
1459        if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1460            tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1461                return -EINVAL;
1462
1463        /* Check temp in extent form to max in target */
1464        if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1465            XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1466                        XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1467                return -EINVAL;
1468
1469        /* Check target in extent form to max in temp */
1470        if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1471            XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1472                        XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1473                return -EINVAL;
1474
1475        /*
1476         * If we are in a btree format, check that the temp root block will fit
1477         * in the target and that it has enough extents to be in btree format
1478         * in the target.
1479         *
1480         * Note that we have to be careful to allow btree->extent conversions
1481         * (a common defrag case) which will occur when the temp inode is in
1482         * extent format...
1483         */
1484        if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1485                if (XFS_IFORK_Q(ip) &&
1486                    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1487                        return -EINVAL;
1488                if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1489                    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1490                        return -EINVAL;
1491        }
1492
1493        /* Reciprocal target->temp btree format checks */
1494        if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1495                if (XFS_IFORK_Q(tip) &&
1496                    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1497                        return -EINVAL;
1498                if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1499                    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1500                        return -EINVAL;
1501        }
1502
1503        return 0;
1504}
1505
1506static int
1507xfs_swap_extent_flush(
1508        struct xfs_inode        *ip)
1509{
1510        int     error;
1511
1512        error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1513        if (error)
1514                return error;
1515        truncate_pagecache_range(VFS_I(ip), 0, -1);
1516
1517        /* Verify O_DIRECT for ftmp */
1518        if (VFS_I(ip)->i_mapping->nrpages)
1519                return -EINVAL;
1520        return 0;
1521}
1522
1523/*
1524 * Move extents from one file to another, when rmap is enabled.
1525 */
1526STATIC int
1527xfs_swap_extent_rmap(
1528        struct xfs_trans                **tpp,
1529        struct xfs_inode                *ip,
1530        struct xfs_inode                *tip)
1531{
1532        struct xfs_trans                *tp = *tpp;
1533        struct xfs_bmbt_irec            irec;
1534        struct xfs_bmbt_irec            uirec;
1535        struct xfs_bmbt_irec            tirec;
1536        xfs_fileoff_t                   offset_fsb;
1537        xfs_fileoff_t                   end_fsb;
1538        xfs_filblks_t                   count_fsb;
1539        int                             error;
1540        xfs_filblks_t                   ilen;
1541        xfs_filblks_t                   rlen;
1542        int                             nimaps;
1543        uint64_t                        tip_flags2;
1544
1545        /*
1546         * If the source file has shared blocks, we must flag the donor
1547         * file as having shared blocks so that we get the shared-block
1548         * rmap functions when we go to fix up the rmaps.  The flags
1549         * will be switch for reals later.
1550         */
1551        tip_flags2 = tip->i_d.di_flags2;
1552        if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1553                tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1554
1555        offset_fsb = 0;
1556        end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1557        count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1558
1559        while (count_fsb) {
1560                /* Read extent from the donor file */
1561                nimaps = 1;
1562                error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1563                                &nimaps, 0);
1564                if (error)
1565                        goto out;
1566                ASSERT(nimaps == 1);
1567                ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1568
1569                trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1570                ilen = tirec.br_blockcount;
1571
1572                /* Unmap the old blocks in the source file. */
1573                while (tirec.br_blockcount) {
1574                        ASSERT(tp->t_firstblock == NULLFSBLOCK);
1575                        trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1576
1577                        /* Read extent from the source file */
1578                        nimaps = 1;
1579                        error = xfs_bmapi_read(ip, tirec.br_startoff,
1580                                        tirec.br_blockcount, &irec,
1581                                        &nimaps, 0);
1582                        if (error)
1583                                goto out;
1584                        ASSERT(nimaps == 1);
1585                        ASSERT(tirec.br_startoff == irec.br_startoff);
1586                        trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1587
1588                        /* Trim the extent. */
1589                        uirec = tirec;
1590                        uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1591                                        tirec.br_blockcount,
1592                                        irec.br_blockcount);
1593                        trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1594
1595                        /* Remove the mapping from the donor file. */
1596                        error = xfs_bmap_unmap_extent(tp, tip, &uirec);
1597                        if (error)
1598                                goto out;
1599
1600                        /* Remove the mapping from the source file. */
1601                        error = xfs_bmap_unmap_extent(tp, ip, &irec);
1602                        if (error)
1603                                goto out;
1604
1605                        /* Map the donor file's blocks into the source file. */
1606                        error = xfs_bmap_map_extent(tp, ip, &uirec);
1607                        if (error)
1608                                goto out;
1609
1610                        /* Map the source file's blocks into the donor file. */
1611                        error = xfs_bmap_map_extent(tp, tip, &irec);
1612                        if (error)
1613                                goto out;
1614
1615                        error = xfs_defer_finish(tpp);
1616                        tp = *tpp;
1617                        if (error)
1618                                goto out;
1619
1620                        tirec.br_startoff += rlen;
1621                        if (tirec.br_startblock != HOLESTARTBLOCK &&
1622                            tirec.br_startblock != DELAYSTARTBLOCK)
1623                                tirec.br_startblock += rlen;
1624                        tirec.br_blockcount -= rlen;
1625                }
1626
1627                /* Roll on... */
1628                count_fsb -= ilen;
1629                offset_fsb += ilen;
1630        }
1631
1632        tip->i_d.di_flags2 = tip_flags2;
1633        return 0;
1634
1635out:
1636        trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1637        tip->i_d.di_flags2 = tip_flags2;
1638        return error;
1639}
1640
1641/* Swap the extents of two files by swapping data forks. */
1642STATIC int
1643xfs_swap_extent_forks(
1644        struct xfs_trans        *tp,
1645        struct xfs_inode        *ip,
1646        struct xfs_inode        *tip,
1647        int                     *src_log_flags,
1648        int                     *target_log_flags)
1649{
1650        xfs_filblks_t           aforkblks = 0;
1651        xfs_filblks_t           taforkblks = 0;
1652        xfs_extnum_t            junk;
1653        uint64_t                tmp;
1654        int                     error;
1655
1656        /*
1657         * Count the number of extended attribute blocks
1658         */
1659        if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1660             (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1661                error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1662                                &aforkblks);
1663                if (error)
1664                        return error;
1665        }
1666        if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1667             (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1668                error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1669                                &taforkblks);
1670                if (error)
1671                        return error;
1672        }
1673
1674        /*
1675         * Btree format (v3) inodes have the inode number stamped in the bmbt
1676         * block headers. We can't start changing the bmbt blocks until the
1677         * inode owner change is logged so recovery does the right thing in the
1678         * event of a crash. Set the owner change log flags now and leave the
1679         * bmbt scan as the last step.
1680         */
1681        if (ip->i_d.di_version == 3 &&
1682            ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1683                (*target_log_flags) |= XFS_ILOG_DOWNER;
1684        if (tip->i_d.di_version == 3 &&
1685            tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1686                (*src_log_flags) |= XFS_ILOG_DOWNER;
1687
1688        /*
1689         * Swap the data forks of the inodes
1690         */
1691        swap(ip->i_df, tip->i_df);
1692
1693        /*
1694         * Fix the on-disk inode values
1695         */
1696        tmp = (uint64_t)ip->i_d.di_nblocks;
1697        ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1698        tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1699
1700        swap(ip->i_d.di_nextents, tip->i_d.di_nextents);
1701        swap(ip->i_d.di_format, tip->i_d.di_format);
1702
1703        /*
1704         * The extents in the source inode could still contain speculative
1705         * preallocation beyond EOF (e.g. the file is open but not modified
1706         * while defrag is in progress). In that case, we need to copy over the
1707         * number of delalloc blocks the data fork in the source inode is
1708         * tracking beyond EOF so that when the fork is truncated away when the
1709         * temporary inode is unlinked we don't underrun the i_delayed_blks
1710         * counter on that inode.
1711         */
1712        ASSERT(tip->i_delayed_blks == 0);
1713        tip->i_delayed_blks = ip->i_delayed_blks;
1714        ip->i_delayed_blks = 0;
1715
1716        switch (ip->i_d.di_format) {
1717        case XFS_DINODE_FMT_EXTENTS:
1718                (*src_log_flags) |= XFS_ILOG_DEXT;
1719                break;
1720        case XFS_DINODE_FMT_BTREE:
1721                ASSERT(ip->i_d.di_version < 3 ||
1722                       (*src_log_flags & XFS_ILOG_DOWNER));
1723                (*src_log_flags) |= XFS_ILOG_DBROOT;
1724                break;
1725        }
1726
1727        switch (tip->i_d.di_format) {
1728        case XFS_DINODE_FMT_EXTENTS:
1729                (*target_log_flags) |= XFS_ILOG_DEXT;
1730                break;
1731        case XFS_DINODE_FMT_BTREE:
1732                (*target_log_flags) |= XFS_ILOG_DBROOT;
1733                ASSERT(tip->i_d.di_version < 3 ||
1734                       (*target_log_flags & XFS_ILOG_DOWNER));
1735                break;
1736        }
1737
1738        return 0;
1739}
1740
1741/*
1742 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1743 * change owner scan attempts to order all modified buffers in the current
1744 * transaction. In the event of ordered buffer failure, the offending buffer is
1745 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1746 * the transaction in this case to replenish the fallback log reservation and
1747 * restart the scan. This process repeats until the scan completes.
1748 */
1749static int
1750xfs_swap_change_owner(
1751        struct xfs_trans        **tpp,
1752        struct xfs_inode        *ip,
1753        struct xfs_inode        *tmpip)
1754{
1755        int                     error;
1756        struct xfs_trans        *tp = *tpp;
1757
1758        do {
1759                error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1760                                              NULL);
1761                /* success or fatal error */
1762                if (error != -EAGAIN)
1763                        break;
1764
1765                error = xfs_trans_roll(tpp);
1766                if (error)
1767                        break;
1768                tp = *tpp;
1769
1770                /*
1771                 * Redirty both inodes so they can relog and keep the log tail
1772                 * moving forward.
1773                 */
1774                xfs_trans_ijoin(tp, ip, 0);
1775                xfs_trans_ijoin(tp, tmpip, 0);
1776                xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1777                xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1778        } while (true);
1779
1780        return error;
1781}
1782
1783int
1784xfs_swap_extents(
1785        struct xfs_inode        *ip,    /* target inode */
1786        struct xfs_inode        *tip,   /* tmp inode */
1787        struct xfs_swapext      *sxp)
1788{
1789        struct xfs_mount        *mp = ip->i_mount;
1790        struct xfs_trans        *tp;
1791        struct xfs_bstat        *sbp = &sxp->sx_stat;
1792        int                     src_log_flags, target_log_flags;
1793        int                     error = 0;
1794        int                     lock_flags;
1795        uint64_t                f;
1796        int                     resblks = 0;
1797
1798        /*
1799         * Lock the inodes against other IO, page faults and truncate to
1800         * begin with.  Then we can ensure the inodes are flushed and have no
1801         * page cache safely. Once we have done this we can take the ilocks and
1802         * do the rest of the checks.
1803         */
1804        lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1805        lock_flags = XFS_MMAPLOCK_EXCL;
1806        xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1807
1808        /* Verify that both files have the same format */
1809        if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1810                error = -EINVAL;
1811                goto out_unlock;
1812        }
1813
1814        /* Verify both files are either real-time or non-realtime */
1815        if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1816                error = -EINVAL;
1817                goto out_unlock;
1818        }
1819
1820        error = xfs_swap_extent_flush(ip);
1821        if (error)
1822                goto out_unlock;
1823        error = xfs_swap_extent_flush(tip);
1824        if (error)
1825                goto out_unlock;
1826
1827        /*
1828         * Extent "swapping" with rmap requires a permanent reservation and
1829         * a block reservation because it's really just a remap operation
1830         * performed with log redo items!
1831         */
1832        if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1833                int             w       = XFS_DATA_FORK;
1834                uint32_t        ipnext  = XFS_IFORK_NEXTENTS(ip, w);
1835                uint32_t        tipnext = XFS_IFORK_NEXTENTS(tip, w);
1836
1837                /*
1838                 * Conceptually this shouldn't affect the shape of either bmbt,
1839                 * but since we atomically move extents one by one, we reserve
1840                 * enough space to rebuild both trees.
1841                 */
1842                resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1843                resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1844
1845                /*
1846                 * Handle the corner case where either inode might straddle the
1847                 * btree format boundary. If so, the inode could bounce between
1848                 * btree <-> extent format on unmap -> remap cycles, freeing and
1849                 * allocating a bmapbt block each time.
1850                 */
1851                if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
1852                        resblks += XFS_IFORK_MAXEXT(ip, w);
1853                if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
1854                        resblks += XFS_IFORK_MAXEXT(tip, w);
1855        }
1856        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1857        if (error)
1858                goto out_unlock;
1859
1860        /*
1861         * Lock and join the inodes to the tansaction so that transaction commit
1862         * or cancel will unlock the inodes from this point onwards.
1863         */
1864        xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1865        lock_flags |= XFS_ILOCK_EXCL;
1866        xfs_trans_ijoin(tp, ip, 0);
1867        xfs_trans_ijoin(tp, tip, 0);
1868
1869
1870        /* Verify all data are being swapped */
1871        if (sxp->sx_offset != 0 ||
1872            sxp->sx_length != ip->i_d.di_size ||
1873            sxp->sx_length != tip->i_d.di_size) {
1874                error = -EFAULT;
1875                goto out_trans_cancel;
1876        }
1877
1878        trace_xfs_swap_extent_before(ip, 0);
1879        trace_xfs_swap_extent_before(tip, 1);
1880
1881        /* check inode formats now that data is flushed */
1882        error = xfs_swap_extents_check_format(ip, tip);
1883        if (error) {
1884                xfs_notice(mp,
1885                    "%s: inode 0x%llx format is incompatible for exchanging.",
1886                                __func__, ip->i_ino);
1887                goto out_trans_cancel;
1888        }
1889
1890        /*
1891         * Compare the current change & modify times with that
1892         * passed in.  If they differ, we abort this swap.
1893         * This is the mechanism used to ensure the calling
1894         * process that the file was not changed out from
1895         * under it.
1896         */
1897        if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1898            (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1899            (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1900            (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1901                error = -EBUSY;
1902                goto out_trans_cancel;
1903        }
1904
1905        /*
1906         * Note the trickiness in setting the log flags - we set the owner log
1907         * flag on the opposite inode (i.e. the inode we are setting the new
1908         * owner to be) because once we swap the forks and log that, log
1909         * recovery is going to see the fork as owned by the swapped inode,
1910         * not the pre-swapped inodes.
1911         */
1912        src_log_flags = XFS_ILOG_CORE;
1913        target_log_flags = XFS_ILOG_CORE;
1914
1915        if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1916                error = xfs_swap_extent_rmap(&tp, ip, tip);
1917        else
1918                error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1919                                &target_log_flags);
1920        if (error)
1921                goto out_trans_cancel;
1922
1923        /* Do we have to swap reflink flags? */
1924        if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1925            (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1926                f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1927                ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1928                ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1929                tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1930                tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
1931        }
1932
1933        /* Swap the cow forks. */
1934        if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1935                ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1936                ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1937
1938                swap(ip->i_cnextents, tip->i_cnextents);
1939                swap(ip->i_cowfp, tip->i_cowfp);
1940
1941                if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1942                        xfs_inode_set_cowblocks_tag(ip);
1943                else
1944                        xfs_inode_clear_cowblocks_tag(ip);
1945                if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1946                        xfs_inode_set_cowblocks_tag(tip);
1947                else
1948                        xfs_inode_clear_cowblocks_tag(tip);
1949        }
1950
1951        xfs_trans_log_inode(tp, ip,  src_log_flags);
1952        xfs_trans_log_inode(tp, tip, target_log_flags);
1953
1954        /*
1955         * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1956         * have inode number owner values in the bmbt blocks that still refer to
1957         * the old inode. Scan each bmbt to fix up the owner values with the
1958         * inode number of the current inode.
1959         */
1960        if (src_log_flags & XFS_ILOG_DOWNER) {
1961                error = xfs_swap_change_owner(&tp, ip, tip);
1962                if (error)
1963                        goto out_trans_cancel;
1964        }
1965        if (target_log_flags & XFS_ILOG_DOWNER) {
1966                error = xfs_swap_change_owner(&tp, tip, ip);
1967                if (error)
1968                        goto out_trans_cancel;
1969        }
1970
1971        /*
1972         * If this is a synchronous mount, make sure that the
1973         * transaction goes to disk before returning to the user.
1974         */
1975        if (mp->m_flags & XFS_MOUNT_WSYNC)
1976                xfs_trans_set_sync(tp);
1977
1978        error = xfs_trans_commit(tp);
1979
1980        trace_xfs_swap_extent_after(ip, 0);
1981        trace_xfs_swap_extent_after(tip, 1);
1982
1983out_unlock:
1984        xfs_iunlock(ip, lock_flags);
1985        xfs_iunlock(tip, lock_flags);
1986        unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1987        return error;
1988
1989out_trans_cancel:
1990        xfs_trans_cancel(tp);
1991        goto out_unlock;
1992}
1993