linux/fs/xfs/xfs_bmap_util.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * Copyright (c) 2012 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_bit.h"
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_btree.h"
  18#include "xfs_trans.h"
  19#include "xfs_alloc.h"
  20#include "xfs_bmap.h"
  21#include "xfs_bmap_util.h"
  22#include "xfs_bmap_btree.h"
  23#include "xfs_rtalloc.h"
  24#include "xfs_error.h"
  25#include "xfs_quota.h"
  26#include "xfs_trans_space.h"
  27#include "xfs_trace.h"
  28#include "xfs_icache.h"
  29#include "xfs_iomap.h"
  30#include "xfs_reflink.h"
  31
  32/* Kernel only BMAP related definitions and functions */
  33
  34/*
  35 * Convert the given file system block to a disk block.  We have to treat it
  36 * differently based on whether the file is a real time file or not, because the
  37 * bmap code does.
  38 */
  39xfs_daddr_t
  40xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  41{
  42        if (XFS_IS_REALTIME_INODE(ip))
  43                return XFS_FSB_TO_BB(ip->i_mount, fsb);
  44        return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
  45}
  46
  47/*
  48 * Routine to zero an extent on disk allocated to the specific inode.
  49 *
  50 * The VFS functions take a linearised filesystem block offset, so we have to
  51 * convert the sparse xfs fsb to the right format first.
  52 * VFS types are real funky, too.
  53 */
  54int
  55xfs_zero_extent(
  56        struct xfs_inode        *ip,
  57        xfs_fsblock_t           start_fsb,
  58        xfs_off_t               count_fsb)
  59{
  60        struct xfs_mount        *mp = ip->i_mount;
  61        struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
  62        xfs_daddr_t             sector = xfs_fsb_to_db(ip, start_fsb);
  63        sector_t                block = XFS_BB_TO_FSBT(mp, sector);
  64
  65        return blkdev_issue_zeroout(target->bt_bdev,
  66                block << (mp->m_super->s_blocksize_bits - 9),
  67                count_fsb << (mp->m_super->s_blocksize_bits - 9),
  68                GFP_NOFS, 0);
  69}
  70
  71#ifdef CONFIG_XFS_RT
  72int
  73xfs_bmap_rtalloc(
  74        struct xfs_bmalloca     *ap)
  75{
  76        struct xfs_mount        *mp = ap->ip->i_mount;
  77        xfs_fileoff_t           orig_offset = ap->offset;
  78        xfs_rtblock_t           rtb;
  79        xfs_extlen_t            prod = 0;  /* product factor for allocators */
  80        xfs_extlen_t            mod = 0;   /* product factor for allocators */
  81        xfs_extlen_t            ralen = 0; /* realtime allocation length */
  82        xfs_extlen_t            align;     /* minimum allocation alignment */
  83        xfs_extlen_t            orig_length = ap->length;
  84        xfs_extlen_t            minlen = mp->m_sb.sb_rextsize;
  85        xfs_extlen_t            raminlen;
  86        bool                    rtlocked = false;
  87        bool                    ignore_locality = false;
  88        int                     error;
  89
  90        align = xfs_get_extsz_hint(ap->ip);
  91retry:
  92        prod = align / mp->m_sb.sb_rextsize;
  93        error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
  94                                        align, 1, ap->eof, 0,
  95                                        ap->conv, &ap->offset, &ap->length);
  96        if (error)
  97                return error;
  98        ASSERT(ap->length);
  99        ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
 100
 101        /*
 102         * If we shifted the file offset downward to satisfy an extent size
 103         * hint, increase minlen by that amount so that the allocator won't
 104         * give us an allocation that's too short to cover at least one of the
 105         * blocks that the caller asked for.
 106         */
 107        if (ap->offset != orig_offset)
 108                minlen += orig_offset - ap->offset;
 109
 110        /*
 111         * If the offset & length are not perfectly aligned
 112         * then kill prod, it will just get us in trouble.
 113         */
 114        div_u64_rem(ap->offset, align, &mod);
 115        if (mod || ap->length % align)
 116                prod = 1;
 117        /*
 118         * Set ralen to be the actual requested length in rtextents.
 119         */
 120        ralen = ap->length / mp->m_sb.sb_rextsize;
 121        /*
 122         * If the old value was close enough to MAXEXTLEN that
 123         * we rounded up to it, cut it back so it's valid again.
 124         * Note that if it's a really large request (bigger than
 125         * MAXEXTLEN), we don't hear about that number, and can't
 126         * adjust the starting point to match it.
 127         */
 128        if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
 129                ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
 130
 131        /*
 132         * Lock out modifications to both the RT bitmap and summary inodes
 133         */
 134        if (!rtlocked) {
 135                xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
 136                xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
 137                xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
 138                xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
 139                rtlocked = true;
 140        }
 141
 142        /*
 143         * If it's an allocation to an empty file at offset 0,
 144         * pick an extent that will space things out in the rt area.
 145         */
 146        if (ap->eof && ap->offset == 0) {
 147                xfs_rtblock_t rtx; /* realtime extent no */
 148
 149                error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
 150                if (error)
 151                        return error;
 152                ap->blkno = rtx * mp->m_sb.sb_rextsize;
 153        } else {
 154                ap->blkno = 0;
 155        }
 156
 157        xfs_bmap_adjacent(ap);
 158
 159        /*
 160         * Realtime allocation, done through xfs_rtallocate_extent.
 161         */
 162        if (ignore_locality)
 163                ap->blkno = 0;
 164        else
 165                do_div(ap->blkno, mp->m_sb.sb_rextsize);
 166        rtb = ap->blkno;
 167        ap->length = ralen;
 168        raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
 169        error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
 170                        &ralen, ap->wasdel, prod, &rtb);
 171        if (error)
 172                return error;
 173
 174        if (rtb != NULLRTBLOCK) {
 175                ap->blkno = rtb * mp->m_sb.sb_rextsize;
 176                ap->length = ralen * mp->m_sb.sb_rextsize;
 177                ap->ip->i_nblocks += ap->length;
 178                xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
 179                if (ap->wasdel)
 180                        ap->ip->i_delayed_blks -= ap->length;
 181                /*
 182                 * Adjust the disk quota also. This was reserved
 183                 * earlier.
 184                 */
 185                xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
 186                        ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
 187                                        XFS_TRANS_DQ_RTBCOUNT, ap->length);
 188                return 0;
 189        }
 190
 191        if (align > mp->m_sb.sb_rextsize) {
 192                /*
 193                 * We previously enlarged the request length to try to satisfy
 194                 * an extent size hint.  The allocator didn't return anything,
 195                 * so reset the parameters to the original values and try again
 196                 * without alignment criteria.
 197                 */
 198                ap->offset = orig_offset;
 199                ap->length = orig_length;
 200                minlen = align = mp->m_sb.sb_rextsize;
 201                goto retry;
 202        }
 203
 204        if (!ignore_locality && ap->blkno != 0) {
 205                /*
 206                 * If we can't allocate near a specific rt extent, try again
 207                 * without locality criteria.
 208                 */
 209                ignore_locality = true;
 210                goto retry;
 211        }
 212
 213        ap->blkno = NULLFSBLOCK;
 214        ap->length = 0;
 215        return 0;
 216}
 217#endif /* CONFIG_XFS_RT */
 218
 219/*
 220 * Extent tree block counting routines.
 221 */
 222
 223/*
 224 * Count leaf blocks given a range of extent records.  Delayed allocation
 225 * extents are not counted towards the totals.
 226 */
 227xfs_extnum_t
 228xfs_bmap_count_leaves(
 229        struct xfs_ifork        *ifp,
 230        xfs_filblks_t           *count)
 231{
 232        struct xfs_iext_cursor  icur;
 233        struct xfs_bmbt_irec    got;
 234        xfs_extnum_t            numrecs = 0;
 235
 236        for_each_xfs_iext(ifp, &icur, &got) {
 237                if (!isnullstartblock(got.br_startblock)) {
 238                        *count += got.br_blockcount;
 239                        numrecs++;
 240                }
 241        }
 242
 243        return numrecs;
 244}
 245
 246/*
 247 * Count fsblocks of the given fork.  Delayed allocation extents are
 248 * not counted towards the totals.
 249 */
 250int
 251xfs_bmap_count_blocks(
 252        struct xfs_trans        *tp,
 253        struct xfs_inode        *ip,
 254        int                     whichfork,
 255        xfs_extnum_t            *nextents,
 256        xfs_filblks_t           *count)
 257{
 258        struct xfs_mount        *mp = ip->i_mount;
 259        struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
 260        struct xfs_btree_cur    *cur;
 261        xfs_extlen_t            btblocks = 0;
 262        int                     error;
 263
 264        *nextents = 0;
 265        *count = 0;
 266
 267        if (!ifp)
 268                return 0;
 269
 270        switch (ifp->if_format) {
 271        case XFS_DINODE_FMT_BTREE:
 272                error = xfs_iread_extents(tp, ip, whichfork);
 273                if (error)
 274                        return error;
 275
 276                cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
 277                error = xfs_btree_count_blocks(cur, &btblocks);
 278                xfs_btree_del_cursor(cur, error);
 279                if (error)
 280                        return error;
 281
 282                /*
 283                 * xfs_btree_count_blocks includes the root block contained in
 284                 * the inode fork in @btblocks, so subtract one because we're
 285                 * only interested in allocated disk blocks.
 286                 */
 287                *count += btblocks - 1;
 288
 289                fallthrough;
 290        case XFS_DINODE_FMT_EXTENTS:
 291                *nextents = xfs_bmap_count_leaves(ifp, count);
 292                break;
 293        }
 294
 295        return 0;
 296}
 297
 298static int
 299xfs_getbmap_report_one(
 300        struct xfs_inode        *ip,
 301        struct getbmapx         *bmv,
 302        struct kgetbmap         *out,
 303        int64_t                 bmv_end,
 304        struct xfs_bmbt_irec    *got)
 305{
 306        struct kgetbmap         *p = out + bmv->bmv_entries;
 307        bool                    shared = false;
 308        int                     error;
 309
 310        error = xfs_reflink_trim_around_shared(ip, got, &shared);
 311        if (error)
 312                return error;
 313
 314        if (isnullstartblock(got->br_startblock) ||
 315            got->br_startblock == DELAYSTARTBLOCK) {
 316                /*
 317                 * Delalloc extents that start beyond EOF can occur due to
 318                 * speculative EOF allocation when the delalloc extent is larger
 319                 * than the largest freespace extent at conversion time.  These
 320                 * extents cannot be converted by data writeback, so can exist
 321                 * here even if we are not supposed to be finding delalloc
 322                 * extents.
 323                 */
 324                if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
 325                        ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
 326
 327                p->bmv_oflags |= BMV_OF_DELALLOC;
 328                p->bmv_block = -2;
 329        } else {
 330                p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
 331        }
 332
 333        if (got->br_state == XFS_EXT_UNWRITTEN &&
 334            (bmv->bmv_iflags & BMV_IF_PREALLOC))
 335                p->bmv_oflags |= BMV_OF_PREALLOC;
 336
 337        if (shared)
 338                p->bmv_oflags |= BMV_OF_SHARED;
 339
 340        p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
 341        p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
 342
 343        bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 344        bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 345        bmv->bmv_entries++;
 346        return 0;
 347}
 348
 349static void
 350xfs_getbmap_report_hole(
 351        struct xfs_inode        *ip,
 352        struct getbmapx         *bmv,
 353        struct kgetbmap         *out,
 354        int64_t                 bmv_end,
 355        xfs_fileoff_t           bno,
 356        xfs_fileoff_t           end)
 357{
 358        struct kgetbmap         *p = out + bmv->bmv_entries;
 359
 360        if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
 361                return;
 362
 363        p->bmv_block = -1;
 364        p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
 365        p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
 366
 367        bmv->bmv_offset = p->bmv_offset + p->bmv_length;
 368        bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
 369        bmv->bmv_entries++;
 370}
 371
 372static inline bool
 373xfs_getbmap_full(
 374        struct getbmapx         *bmv)
 375{
 376        return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
 377}
 378
 379static bool
 380xfs_getbmap_next_rec(
 381        struct xfs_bmbt_irec    *rec,
 382        xfs_fileoff_t           total_end)
 383{
 384        xfs_fileoff_t           end = rec->br_startoff + rec->br_blockcount;
 385
 386        if (end == total_end)
 387                return false;
 388
 389        rec->br_startoff += rec->br_blockcount;
 390        if (!isnullstartblock(rec->br_startblock) &&
 391            rec->br_startblock != DELAYSTARTBLOCK)
 392                rec->br_startblock += rec->br_blockcount;
 393        rec->br_blockcount = total_end - end;
 394        return true;
 395}
 396
 397/*
 398 * Get inode's extents as described in bmv, and format for output.
 399 * Calls formatter to fill the user's buffer until all extents
 400 * are mapped, until the passed-in bmv->bmv_count slots have
 401 * been filled, or until the formatter short-circuits the loop,
 402 * if it is tracking filled-in extents on its own.
 403 */
 404int                                             /* error code */
 405xfs_getbmap(
 406        struct xfs_inode        *ip,
 407        struct getbmapx         *bmv,           /* user bmap structure */
 408        struct kgetbmap         *out)
 409{
 410        struct xfs_mount        *mp = ip->i_mount;
 411        int                     iflags = bmv->bmv_iflags;
 412        int                     whichfork, lock, error = 0;
 413        int64_t                 bmv_end, max_len;
 414        xfs_fileoff_t           bno, first_bno;
 415        struct xfs_ifork        *ifp;
 416        struct xfs_bmbt_irec    got, rec;
 417        xfs_filblks_t           len;
 418        struct xfs_iext_cursor  icur;
 419
 420        if (bmv->bmv_iflags & ~BMV_IF_VALID)
 421                return -EINVAL;
 422#ifndef DEBUG
 423        /* Only allow CoW fork queries if we're debugging. */
 424        if (iflags & BMV_IF_COWFORK)
 425                return -EINVAL;
 426#endif
 427        if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
 428                return -EINVAL;
 429
 430        if (bmv->bmv_length < -1)
 431                return -EINVAL;
 432        bmv->bmv_entries = 0;
 433        if (bmv->bmv_length == 0)
 434                return 0;
 435
 436        if (iflags & BMV_IF_ATTRFORK)
 437                whichfork = XFS_ATTR_FORK;
 438        else if (iflags & BMV_IF_COWFORK)
 439                whichfork = XFS_COW_FORK;
 440        else
 441                whichfork = XFS_DATA_FORK;
 442        ifp = XFS_IFORK_PTR(ip, whichfork);
 443
 444        xfs_ilock(ip, XFS_IOLOCK_SHARED);
 445        switch (whichfork) {
 446        case XFS_ATTR_FORK:
 447                if (!XFS_IFORK_Q(ip))
 448                        goto out_unlock_iolock;
 449
 450                max_len = 1LL << 32;
 451                lock = xfs_ilock_attr_map_shared(ip);
 452                break;
 453        case XFS_COW_FORK:
 454                /* No CoW fork? Just return */
 455                if (!ifp)
 456                        goto out_unlock_iolock;
 457
 458                if (xfs_get_cowextsz_hint(ip))
 459                        max_len = mp->m_super->s_maxbytes;
 460                else
 461                        max_len = XFS_ISIZE(ip);
 462
 463                lock = XFS_ILOCK_SHARED;
 464                xfs_ilock(ip, lock);
 465                break;
 466        case XFS_DATA_FORK:
 467                if (!(iflags & BMV_IF_DELALLOC) &&
 468                    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
 469                        error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 470                        if (error)
 471                                goto out_unlock_iolock;
 472
 473                        /*
 474                         * Even after flushing the inode, there can still be
 475                         * delalloc blocks on the inode beyond EOF due to
 476                         * speculative preallocation.  These are not removed
 477                         * until the release function is called or the inode
 478                         * is inactivated.  Hence we cannot assert here that
 479                         * ip->i_delayed_blks == 0.
 480                         */
 481                }
 482
 483                if (xfs_get_extsz_hint(ip) ||
 484                    (ip->i_diflags &
 485                     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
 486                        max_len = mp->m_super->s_maxbytes;
 487                else
 488                        max_len = XFS_ISIZE(ip);
 489
 490                lock = xfs_ilock_data_map_shared(ip);
 491                break;
 492        }
 493
 494        switch (ifp->if_format) {
 495        case XFS_DINODE_FMT_EXTENTS:
 496        case XFS_DINODE_FMT_BTREE:
 497                break;
 498        case XFS_DINODE_FMT_LOCAL:
 499                /* Local format inode forks report no extents. */
 500                goto out_unlock_ilock;
 501        default:
 502                error = -EINVAL;
 503                goto out_unlock_ilock;
 504        }
 505
 506        if (bmv->bmv_length == -1) {
 507                max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
 508                bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
 509        }
 510
 511        bmv_end = bmv->bmv_offset + bmv->bmv_length;
 512
 513        first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
 514        len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
 515
 516        error = xfs_iread_extents(NULL, ip, whichfork);
 517        if (error)
 518                goto out_unlock_ilock;
 519
 520        if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
 521                /*
 522                 * Report a whole-file hole if the delalloc flag is set to
 523                 * stay compatible with the old implementation.
 524                 */
 525                if (iflags & BMV_IF_DELALLOC)
 526                        xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 527                                        XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
 528                goto out_unlock_ilock;
 529        }
 530
 531        while (!xfs_getbmap_full(bmv)) {
 532                xfs_trim_extent(&got, first_bno, len);
 533
 534                /*
 535                 * Report an entry for a hole if this extent doesn't directly
 536                 * follow the previous one.
 537                 */
 538                if (got.br_startoff > bno) {
 539                        xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
 540                                        got.br_startoff);
 541                        if (xfs_getbmap_full(bmv))
 542                                break;
 543                }
 544
 545                /*
 546                 * In order to report shared extents accurately, we report each
 547                 * distinct shared / unshared part of a single bmbt record with
 548                 * an individual getbmapx record.
 549                 */
 550                bno = got.br_startoff + got.br_blockcount;
 551                rec = got;
 552                do {
 553                        error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
 554                                        &rec);
 555                        if (error || xfs_getbmap_full(bmv))
 556                                goto out_unlock_ilock;
 557                } while (xfs_getbmap_next_rec(&rec, bno));
 558
 559                if (!xfs_iext_next_extent(ifp, &icur, &got)) {
 560                        xfs_fileoff_t   end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
 561
 562                        out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
 563
 564                        if (whichfork != XFS_ATTR_FORK && bno < end &&
 565                            !xfs_getbmap_full(bmv)) {
 566                                xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
 567                                                bno, end);
 568                        }
 569                        break;
 570                }
 571
 572                if (bno >= first_bno + len)
 573                        break;
 574        }
 575
 576out_unlock_ilock:
 577        xfs_iunlock(ip, lock);
 578out_unlock_iolock:
 579        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 580        return error;
 581}
 582
 583/*
 584 * Dead simple method of punching delalyed allocation blocks from a range in
 585 * the inode.  This will always punch out both the start and end blocks, even
 586 * if the ranges only partially overlap them, so it is up to the caller to
 587 * ensure that partial blocks are not passed in.
 588 */
 589int
 590xfs_bmap_punch_delalloc_range(
 591        struct xfs_inode        *ip,
 592        xfs_fileoff_t           start_fsb,
 593        xfs_fileoff_t           length)
 594{
 595        struct xfs_ifork        *ifp = &ip->i_df;
 596        xfs_fileoff_t           end_fsb = start_fsb + length;
 597        struct xfs_bmbt_irec    got, del;
 598        struct xfs_iext_cursor  icur;
 599        int                     error = 0;
 600
 601        ASSERT(!xfs_need_iread_extents(ifp));
 602
 603        xfs_ilock(ip, XFS_ILOCK_EXCL);
 604        if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
 605                goto out_unlock;
 606
 607        while (got.br_startoff + got.br_blockcount > start_fsb) {
 608                del = got;
 609                xfs_trim_extent(&del, start_fsb, length);
 610
 611                /*
 612                 * A delete can push the cursor forward. Step back to the
 613                 * previous extent on non-delalloc or extents outside the
 614                 * target range.
 615                 */
 616                if (!del.br_blockcount ||
 617                    !isnullstartblock(del.br_startblock)) {
 618                        if (!xfs_iext_prev_extent(ifp, &icur, &got))
 619                                break;
 620                        continue;
 621                }
 622
 623                error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
 624                                                  &got, &del);
 625                if (error || !xfs_iext_get_extent(ifp, &icur, &got))
 626                        break;
 627        }
 628
 629out_unlock:
 630        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 631        return error;
 632}
 633
 634/*
 635 * Test whether it is appropriate to check an inode for and free post EOF
 636 * blocks. The 'force' parameter determines whether we should also consider
 637 * regular files that are marked preallocated or append-only.
 638 */
 639bool
 640xfs_can_free_eofblocks(
 641        struct xfs_inode        *ip,
 642        bool                    force)
 643{
 644        struct xfs_bmbt_irec    imap;
 645        struct xfs_mount        *mp = ip->i_mount;
 646        xfs_fileoff_t           end_fsb;
 647        xfs_fileoff_t           last_fsb;
 648        int                     nimaps = 1;
 649        int                     error;
 650
 651        /*
 652         * Caller must either hold the exclusive io lock; or be inactivating
 653         * the inode, which guarantees there are no other users of the inode.
 654         */
 655        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
 656               (VFS_I(ip)->i_state & I_FREEING));
 657
 658        /* prealloc/delalloc exists only on regular files */
 659        if (!S_ISREG(VFS_I(ip)->i_mode))
 660                return false;
 661
 662        /*
 663         * Zero sized files with no cached pages and delalloc blocks will not
 664         * have speculative prealloc/delalloc blocks to remove.
 665         */
 666        if (VFS_I(ip)->i_size == 0 &&
 667            VFS_I(ip)->i_mapping->nrpages == 0 &&
 668            ip->i_delayed_blks == 0)
 669                return false;
 670
 671        /* If we haven't read in the extent list, then don't do it now. */
 672        if (xfs_need_iread_extents(&ip->i_df))
 673                return false;
 674
 675        /*
 676         * Do not free real preallocated or append-only files unless the file
 677         * has delalloc blocks and we are forced to remove them.
 678         */
 679        if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
 680                if (!force || ip->i_delayed_blks == 0)
 681                        return false;
 682
 683        /*
 684         * Do not try to free post-EOF blocks if EOF is beyond the end of the
 685         * range supported by the page cache, because the truncation will loop
 686         * forever.
 687         */
 688        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
 689        last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 690        if (last_fsb <= end_fsb)
 691                return false;
 692
 693        /*
 694         * Look up the mapping for the first block past EOF.  If we can't find
 695         * it, there's nothing to free.
 696         */
 697        xfs_ilock(ip, XFS_ILOCK_SHARED);
 698        error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
 699                        0);
 700        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 701        if (error || nimaps == 0)
 702                return false;
 703
 704        /*
 705         * If there's a real mapping there or there are delayed allocation
 706         * reservations, then we have post-EOF blocks to try to free.
 707         */
 708        return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
 709}
 710
 711/*
 712 * This is called to free any blocks beyond eof. The caller must hold
 713 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
 714 * reference to the inode.
 715 */
 716int
 717xfs_free_eofblocks(
 718        struct xfs_inode        *ip)
 719{
 720        struct xfs_trans        *tp;
 721        struct xfs_mount        *mp = ip->i_mount;
 722        int                     error;
 723
 724        /* Attach the dquots to the inode up front. */
 725        error = xfs_qm_dqattach(ip);
 726        if (error)
 727                return error;
 728
 729        /* Wait on dio to ensure i_size has settled. */
 730        inode_dio_wait(VFS_I(ip));
 731
 732        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 733        if (error) {
 734                ASSERT(xfs_is_shutdown(mp));
 735                return error;
 736        }
 737
 738        xfs_ilock(ip, XFS_ILOCK_EXCL);
 739        xfs_trans_ijoin(tp, ip, 0);
 740
 741        /*
 742         * Do not update the on-disk file size.  If we update the on-disk file
 743         * size and then the system crashes before the contents of the file are
 744         * flushed to disk then the files may be full of holes (ie NULL files
 745         * bug).
 746         */
 747        error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
 748                                XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
 749        if (error)
 750                goto err_cancel;
 751
 752        error = xfs_trans_commit(tp);
 753        if (error)
 754                goto out_unlock;
 755
 756        xfs_inode_clear_eofblocks_tag(ip);
 757        goto out_unlock;
 758
 759err_cancel:
 760        /*
 761         * If we get an error at this point we simply don't
 762         * bother truncating the file.
 763         */
 764        xfs_trans_cancel(tp);
 765out_unlock:
 766        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 767        return error;
 768}
 769
 770int
 771xfs_alloc_file_space(
 772        struct xfs_inode        *ip,
 773        xfs_off_t               offset,
 774        xfs_off_t               len,
 775        int                     alloc_type)
 776{
 777        xfs_mount_t             *mp = ip->i_mount;
 778        xfs_off_t               count;
 779        xfs_filblks_t           allocated_fsb;
 780        xfs_filblks_t           allocatesize_fsb;
 781        xfs_extlen_t            extsz, temp;
 782        xfs_fileoff_t           startoffset_fsb;
 783        xfs_fileoff_t           endoffset_fsb;
 784        int                     nimaps;
 785        int                     rt;
 786        xfs_trans_t             *tp;
 787        xfs_bmbt_irec_t         imaps[1], *imapp;
 788        int                     error;
 789
 790        trace_xfs_alloc_file_space(ip);
 791
 792        if (xfs_is_shutdown(mp))
 793                return -EIO;
 794
 795        error = xfs_qm_dqattach(ip);
 796        if (error)
 797                return error;
 798
 799        if (len <= 0)
 800                return -EINVAL;
 801
 802        rt = XFS_IS_REALTIME_INODE(ip);
 803        extsz = xfs_get_extsz_hint(ip);
 804
 805        count = len;
 806        imapp = &imaps[0];
 807        nimaps = 1;
 808        startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
 809        endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
 810        allocatesize_fsb = endoffset_fsb - startoffset_fsb;
 811
 812        /*
 813         * Allocate file space until done or until there is an error
 814         */
 815        while (allocatesize_fsb && !error) {
 816                xfs_fileoff_t   s, e;
 817                unsigned int    dblocks, rblocks, resblks;
 818
 819                /*
 820                 * Determine space reservations for data/realtime.
 821                 */
 822                if (unlikely(extsz)) {
 823                        s = startoffset_fsb;
 824                        do_div(s, extsz);
 825                        s *= extsz;
 826                        e = startoffset_fsb + allocatesize_fsb;
 827                        div_u64_rem(startoffset_fsb, extsz, &temp);
 828                        if (temp)
 829                                e += temp;
 830                        div_u64_rem(e, extsz, &temp);
 831                        if (temp)
 832                                e += extsz - temp;
 833                } else {
 834                        s = 0;
 835                        e = allocatesize_fsb;
 836                }
 837
 838                /*
 839                 * The transaction reservation is limited to a 32-bit block
 840                 * count, hence we need to limit the number of blocks we are
 841                 * trying to reserve to avoid an overflow. We can't allocate
 842                 * more than @nimaps extents, and an extent is limited on disk
 843                 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
 844                 */
 845                resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
 846                if (unlikely(rt)) {
 847                        dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 848                        rblocks = resblks;
 849                } else {
 850                        dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
 851                        rblocks = 0;
 852                }
 853
 854                /*
 855                 * Allocate and setup the transaction.
 856                 */
 857                error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
 858                                dblocks, rblocks, false, &tp);
 859                if (error)
 860                        break;
 861
 862                error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
 863                                XFS_IEXT_ADD_NOSPLIT_CNT);
 864                if (error)
 865                        goto error;
 866
 867                error = xfs_bmapi_write(tp, ip, startoffset_fsb,
 868                                        allocatesize_fsb, alloc_type, 0, imapp,
 869                                        &nimaps);
 870                if (error)
 871                        goto error;
 872
 873                /*
 874                 * Complete the transaction
 875                 */
 876                error = xfs_trans_commit(tp);
 877                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 878                if (error)
 879                        break;
 880
 881                allocated_fsb = imapp->br_blockcount;
 882
 883                if (nimaps == 0) {
 884                        error = -ENOSPC;
 885                        break;
 886                }
 887
 888                startoffset_fsb += allocated_fsb;
 889                allocatesize_fsb -= allocated_fsb;
 890        }
 891
 892        return error;
 893
 894error:
 895        xfs_trans_cancel(tp);
 896        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 897        return error;
 898}
 899
 900static int
 901xfs_unmap_extent(
 902        struct xfs_inode        *ip,
 903        xfs_fileoff_t           startoffset_fsb,
 904        xfs_filblks_t           len_fsb,
 905        int                     *done)
 906{
 907        struct xfs_mount        *mp = ip->i_mount;
 908        struct xfs_trans        *tp;
 909        uint                    resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 910        int                     error;
 911
 912        error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
 913                        false, &tp);
 914        if (error)
 915                return error;
 916
 917        error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
 918                        XFS_IEXT_PUNCH_HOLE_CNT);
 919        if (error)
 920                goto out_trans_cancel;
 921
 922        error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
 923        if (error)
 924                goto out_trans_cancel;
 925
 926        error = xfs_trans_commit(tp);
 927out_unlock:
 928        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 929        return error;
 930
 931out_trans_cancel:
 932        xfs_trans_cancel(tp);
 933        goto out_unlock;
 934}
 935
 936/* Caller must first wait for the completion of any pending DIOs if required. */
 937int
 938xfs_flush_unmap_range(
 939        struct xfs_inode        *ip,
 940        xfs_off_t               offset,
 941        xfs_off_t               len)
 942{
 943        struct xfs_mount        *mp = ip->i_mount;
 944        struct inode            *inode = VFS_I(ip);
 945        xfs_off_t               rounding, start, end;
 946        int                     error;
 947
 948        rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
 949        start = round_down(offset, rounding);
 950        end = round_up(offset + len, rounding) - 1;
 951
 952        error = filemap_write_and_wait_range(inode->i_mapping, start, end);
 953        if (error)
 954                return error;
 955        truncate_pagecache_range(inode, start, end);
 956        return 0;
 957}
 958
 959int
 960xfs_free_file_space(
 961        struct xfs_inode        *ip,
 962        xfs_off_t               offset,
 963        xfs_off_t               len)
 964{
 965        struct xfs_mount        *mp = ip->i_mount;
 966        xfs_fileoff_t           startoffset_fsb;
 967        xfs_fileoff_t           endoffset_fsb;
 968        int                     done = 0, error;
 969
 970        trace_xfs_free_file_space(ip);
 971
 972        error = xfs_qm_dqattach(ip);
 973        if (error)
 974                return error;
 975
 976        if (len <= 0)   /* if nothing being freed */
 977                return 0;
 978
 979        startoffset_fsb = XFS_B_TO_FSB(mp, offset);
 980        endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
 981
 982        /* We can only free complete realtime extents. */
 983        if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
 984                startoffset_fsb = roundup_64(startoffset_fsb,
 985                                             mp->m_sb.sb_rextsize);
 986                endoffset_fsb = rounddown_64(endoffset_fsb,
 987                                             mp->m_sb.sb_rextsize);
 988        }
 989
 990        /*
 991         * Need to zero the stuff we're not freeing, on disk.
 992         */
 993        if (endoffset_fsb > startoffset_fsb) {
 994                while (!done) {
 995                        error = xfs_unmap_extent(ip, startoffset_fsb,
 996                                        endoffset_fsb - startoffset_fsb, &done);
 997                        if (error)
 998                                return error;
 999                }
1000        }
1001
1002        /*
1003         * Now that we've unmap all full blocks we'll have to zero out any
1004         * partial block at the beginning and/or end.  iomap_zero_range is smart
1005         * enough to skip any holes, including those we just created, but we
1006         * must take care not to zero beyond EOF and enlarge i_size.
1007         */
1008        if (offset >= XFS_ISIZE(ip))
1009                return 0;
1010        if (offset + len > XFS_ISIZE(ip))
1011                len = XFS_ISIZE(ip) - offset;
1012        error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
1013                        &xfs_buffered_write_iomap_ops);
1014        if (error)
1015                return error;
1016
1017        /*
1018         * If we zeroed right up to EOF and EOF straddles a page boundary we
1019         * must make sure that the post-EOF area is also zeroed because the
1020         * page could be mmap'd and iomap_zero_range doesn't do that for us.
1021         * Writeback of the eof page will do this, albeit clumsily.
1022         */
1023        if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
1024                error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1025                                round_down(offset + len, PAGE_SIZE), LLONG_MAX);
1026        }
1027
1028        return error;
1029}
1030
1031static int
1032xfs_prepare_shift(
1033        struct xfs_inode        *ip,
1034        loff_t                  offset)
1035{
1036        struct xfs_mount        *mp = ip->i_mount;
1037        int                     error;
1038
1039        /*
1040         * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1041         * into the accessible region of the file.
1042         */
1043        if (xfs_can_free_eofblocks(ip, true)) {
1044                error = xfs_free_eofblocks(ip);
1045                if (error)
1046                        return error;
1047        }
1048
1049        /*
1050         * Shift operations must stabilize the start block offset boundary along
1051         * with the full range of the operation. If we don't, a COW writeback
1052         * completion could race with an insert, front merge with the start
1053         * extent (after split) during the shift and corrupt the file. Start
1054         * with the block just prior to the start to stabilize the boundary.
1055         */
1056        offset = round_down(offset, mp->m_sb.sb_blocksize);
1057        if (offset)
1058                offset -= mp->m_sb.sb_blocksize;
1059
1060        /*
1061         * Writeback and invalidate cache for the remainder of the file as we're
1062         * about to shift down every extent from offset to EOF.
1063         */
1064        error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1065        if (error)
1066                return error;
1067
1068        /*
1069         * Clean out anything hanging around in the cow fork now that
1070         * we've flushed all the dirty data out to disk to avoid having
1071         * CoW extents at the wrong offsets.
1072         */
1073        if (xfs_inode_has_cow_data(ip)) {
1074                error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1075                                true);
1076                if (error)
1077                        return error;
1078        }
1079
1080        return 0;
1081}
1082
1083/*
1084 * xfs_collapse_file_space()
1085 *      This routine frees disk space and shift extent for the given file.
1086 *      The first thing we do is to free data blocks in the specified range
1087 *      by calling xfs_free_file_space(). It would also sync dirty data
1088 *      and invalidate page cache over the region on which collapse range
1089 *      is working. And Shift extent records to the left to cover a hole.
1090 * RETURNS:
1091 *      0 on success
1092 *      errno on error
1093 *
1094 */
1095int
1096xfs_collapse_file_space(
1097        struct xfs_inode        *ip,
1098        xfs_off_t               offset,
1099        xfs_off_t               len)
1100{
1101        struct xfs_mount        *mp = ip->i_mount;
1102        struct xfs_trans        *tp;
1103        int                     error;
1104        xfs_fileoff_t           next_fsb = XFS_B_TO_FSB(mp, offset + len);
1105        xfs_fileoff_t           shift_fsb = XFS_B_TO_FSB(mp, len);
1106        bool                    done = false;
1107
1108        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1109        ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1110
1111        trace_xfs_collapse_file_space(ip);
1112
1113        error = xfs_free_file_space(ip, offset, len);
1114        if (error)
1115                return error;
1116
1117        error = xfs_prepare_shift(ip, offset);
1118        if (error)
1119                return error;
1120
1121        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1122        if (error)
1123                return error;
1124
1125        xfs_ilock(ip, XFS_ILOCK_EXCL);
1126        xfs_trans_ijoin(tp, ip, 0);
1127
1128        while (!done) {
1129                error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1130                                &done);
1131                if (error)
1132                        goto out_trans_cancel;
1133                if (done)
1134                        break;
1135
1136                /* finish any deferred frees and roll the transaction */
1137                error = xfs_defer_finish(&tp);
1138                if (error)
1139                        goto out_trans_cancel;
1140        }
1141
1142        error = xfs_trans_commit(tp);
1143        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1144        return error;
1145
1146out_trans_cancel:
1147        xfs_trans_cancel(tp);
1148        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1149        return error;
1150}
1151
1152/*
1153 * xfs_insert_file_space()
1154 *      This routine create hole space by shifting extents for the given file.
1155 *      The first thing we do is to sync dirty data and invalidate page cache
1156 *      over the region on which insert range is working. And split an extent
1157 *      to two extents at given offset by calling xfs_bmap_split_extent.
1158 *      And shift all extent records which are laying between [offset,
1159 *      last allocated extent] to the right to reserve hole range.
1160 * RETURNS:
1161 *      0 on success
1162 *      errno on error
1163 */
1164int
1165xfs_insert_file_space(
1166        struct xfs_inode        *ip,
1167        loff_t                  offset,
1168        loff_t                  len)
1169{
1170        struct xfs_mount        *mp = ip->i_mount;
1171        struct xfs_trans        *tp;
1172        int                     error;
1173        xfs_fileoff_t           stop_fsb = XFS_B_TO_FSB(mp, offset);
1174        xfs_fileoff_t           next_fsb = NULLFSBLOCK;
1175        xfs_fileoff_t           shift_fsb = XFS_B_TO_FSB(mp, len);
1176        bool                    done = false;
1177
1178        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1179        ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1180
1181        trace_xfs_insert_file_space(ip);
1182
1183        error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1184        if (error)
1185                return error;
1186
1187        error = xfs_prepare_shift(ip, offset);
1188        if (error)
1189                return error;
1190
1191        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1192                        XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1193        if (error)
1194                return error;
1195
1196        xfs_ilock(ip, XFS_ILOCK_EXCL);
1197        xfs_trans_ijoin(tp, ip, 0);
1198
1199        error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
1200                        XFS_IEXT_PUNCH_HOLE_CNT);
1201        if (error)
1202                goto out_trans_cancel;
1203
1204        /*
1205         * The extent shifting code works on extent granularity. So, if stop_fsb
1206         * is not the starting block of extent, we need to split the extent at
1207         * stop_fsb.
1208         */
1209        error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1210        if (error)
1211                goto out_trans_cancel;
1212
1213        do {
1214                error = xfs_defer_finish(&tp);
1215                if (error)
1216                        goto out_trans_cancel;
1217
1218                error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1219                                &done, stop_fsb);
1220                if (error)
1221                        goto out_trans_cancel;
1222        } while (!done);
1223
1224        error = xfs_trans_commit(tp);
1225        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1226        return error;
1227
1228out_trans_cancel:
1229        xfs_trans_cancel(tp);
1230        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1231        return error;
1232}
1233
1234/*
1235 * We need to check that the format of the data fork in the temporary inode is
1236 * valid for the target inode before doing the swap. This is not a problem with
1237 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1238 * data fork depending on the space the attribute fork is taking so we can get
1239 * invalid formats on the target inode.
1240 *
1241 * E.g. target has space for 7 extents in extent format, temp inode only has
1242 * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1243 * btree, but when swapped it needs to be in extent format. Hence we can't just
1244 * blindly swap data forks on attr2 filesystems.
1245 *
1246 * Note that we check the swap in both directions so that we don't end up with
1247 * a corrupt temporary inode, either.
1248 *
1249 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1250 * inode will prevent this situation from occurring, so all we do here is
1251 * reject and log the attempt. basically we are putting the responsibility on
1252 * userspace to get this right.
1253 */
1254static int
1255xfs_swap_extents_check_format(
1256        struct xfs_inode        *ip,    /* target inode */
1257        struct xfs_inode        *tip)   /* tmp inode */
1258{
1259        struct xfs_ifork        *ifp = &ip->i_df;
1260        struct xfs_ifork        *tifp = &tip->i_df;
1261
1262        /* User/group/project quota ids must match if quotas are enforced. */
1263        if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1264            (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1265             !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1266             ip->i_projid != tip->i_projid))
1267                return -EINVAL;
1268
1269        /* Should never get a local format */
1270        if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1271            tifp->if_format == XFS_DINODE_FMT_LOCAL)
1272                return -EINVAL;
1273
1274        /*
1275         * if the target inode has less extents that then temporary inode then
1276         * why did userspace call us?
1277         */
1278        if (ifp->if_nextents < tifp->if_nextents)
1279                return -EINVAL;
1280
1281        /*
1282         * If we have to use the (expensive) rmap swap method, we can
1283         * handle any number of extents and any format.
1284         */
1285        if (xfs_has_rmapbt(ip->i_mount))
1286                return 0;
1287
1288        /*
1289         * if the target inode is in extent form and the temp inode is in btree
1290         * form then we will end up with the target inode in the wrong format
1291         * as we already know there are less extents in the temp inode.
1292         */
1293        if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1294            tifp->if_format == XFS_DINODE_FMT_BTREE)
1295                return -EINVAL;
1296
1297        /* Check temp in extent form to max in target */
1298        if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1299            tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1300                return -EINVAL;
1301
1302        /* Check target in extent form to max in temp */
1303        if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1304            ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1305                return -EINVAL;
1306
1307        /*
1308         * If we are in a btree format, check that the temp root block will fit
1309         * in the target and that it has enough extents to be in btree format
1310         * in the target.
1311         *
1312         * Note that we have to be careful to allow btree->extent conversions
1313         * (a common defrag case) which will occur when the temp inode is in
1314         * extent format...
1315         */
1316        if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1317                if (XFS_IFORK_Q(ip) &&
1318                    XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
1319                        return -EINVAL;
1320                if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1321                        return -EINVAL;
1322        }
1323
1324        /* Reciprocal target->temp btree format checks */
1325        if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1326                if (XFS_IFORK_Q(tip) &&
1327                    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1328                        return -EINVAL;
1329                if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1330                        return -EINVAL;
1331        }
1332
1333        return 0;
1334}
1335
1336static int
1337xfs_swap_extent_flush(
1338        struct xfs_inode        *ip)
1339{
1340        int     error;
1341
1342        error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1343        if (error)
1344                return error;
1345        truncate_pagecache_range(VFS_I(ip), 0, -1);
1346
1347        /* Verify O_DIRECT for ftmp */
1348        if (VFS_I(ip)->i_mapping->nrpages)
1349                return -EINVAL;
1350        return 0;
1351}
1352
1353/*
1354 * Move extents from one file to another, when rmap is enabled.
1355 */
1356STATIC int
1357xfs_swap_extent_rmap(
1358        struct xfs_trans                **tpp,
1359        struct xfs_inode                *ip,
1360        struct xfs_inode                *tip)
1361{
1362        struct xfs_trans                *tp = *tpp;
1363        struct xfs_bmbt_irec            irec;
1364        struct xfs_bmbt_irec            uirec;
1365        struct xfs_bmbt_irec            tirec;
1366        xfs_fileoff_t                   offset_fsb;
1367        xfs_fileoff_t                   end_fsb;
1368        xfs_filblks_t                   count_fsb;
1369        int                             error;
1370        xfs_filblks_t                   ilen;
1371        xfs_filblks_t                   rlen;
1372        int                             nimaps;
1373        uint64_t                        tip_flags2;
1374
1375        /*
1376         * If the source file has shared blocks, we must flag the donor
1377         * file as having shared blocks so that we get the shared-block
1378         * rmap functions when we go to fix up the rmaps.  The flags
1379         * will be switch for reals later.
1380         */
1381        tip_flags2 = tip->i_diflags2;
1382        if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1383                tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1384
1385        offset_fsb = 0;
1386        end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1387        count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1388
1389        while (count_fsb) {
1390                /* Read extent from the donor file */
1391                nimaps = 1;
1392                error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1393                                &nimaps, 0);
1394                if (error)
1395                        goto out;
1396                ASSERT(nimaps == 1);
1397                ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1398
1399                trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1400                ilen = tirec.br_blockcount;
1401
1402                /* Unmap the old blocks in the source file. */
1403                while (tirec.br_blockcount) {
1404                        ASSERT(tp->t_firstblock == NULLFSBLOCK);
1405                        trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1406
1407                        /* Read extent from the source file */
1408                        nimaps = 1;
1409                        error = xfs_bmapi_read(ip, tirec.br_startoff,
1410                                        tirec.br_blockcount, &irec,
1411                                        &nimaps, 0);
1412                        if (error)
1413                                goto out;
1414                        ASSERT(nimaps == 1);
1415                        ASSERT(tirec.br_startoff == irec.br_startoff);
1416                        trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1417
1418                        /* Trim the extent. */
1419                        uirec = tirec;
1420                        uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1421                                        tirec.br_blockcount,
1422                                        irec.br_blockcount);
1423                        trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1424
1425                        if (xfs_bmap_is_real_extent(&uirec)) {
1426                                error = xfs_iext_count_may_overflow(ip,
1427                                                XFS_DATA_FORK,
1428                                                XFS_IEXT_SWAP_RMAP_CNT);
1429                                if (error)
1430                                        goto out;
1431                        }
1432
1433                        if (xfs_bmap_is_real_extent(&irec)) {
1434                                error = xfs_iext_count_may_overflow(tip,
1435                                                XFS_DATA_FORK,
1436                                                XFS_IEXT_SWAP_RMAP_CNT);
1437                                if (error)
1438                                        goto out;
1439                        }
1440
1441                        /* Remove the mapping from the donor file. */
1442                        xfs_bmap_unmap_extent(tp, tip, &uirec);
1443
1444                        /* Remove the mapping from the source file. */
1445                        xfs_bmap_unmap_extent(tp, ip, &irec);
1446
1447                        /* Map the donor file's blocks into the source file. */
1448                        xfs_bmap_map_extent(tp, ip, &uirec);
1449
1450                        /* Map the source file's blocks into the donor file. */
1451                        xfs_bmap_map_extent(tp, tip, &irec);
1452
1453                        error = xfs_defer_finish(tpp);
1454                        tp = *tpp;
1455                        if (error)
1456                                goto out;
1457
1458                        tirec.br_startoff += rlen;
1459                        if (tirec.br_startblock != HOLESTARTBLOCK &&
1460                            tirec.br_startblock != DELAYSTARTBLOCK)
1461                                tirec.br_startblock += rlen;
1462                        tirec.br_blockcount -= rlen;
1463                }
1464
1465                /* Roll on... */
1466                count_fsb -= ilen;
1467                offset_fsb += ilen;
1468        }
1469
1470        tip->i_diflags2 = tip_flags2;
1471        return 0;
1472
1473out:
1474        trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1475        tip->i_diflags2 = tip_flags2;
1476        return error;
1477}
1478
1479/* Swap the extents of two files by swapping data forks. */
1480STATIC int
1481xfs_swap_extent_forks(
1482        struct xfs_trans        *tp,
1483        struct xfs_inode        *ip,
1484        struct xfs_inode        *tip,
1485        int                     *src_log_flags,
1486        int                     *target_log_flags)
1487{
1488        xfs_filblks_t           aforkblks = 0;
1489        xfs_filblks_t           taforkblks = 0;
1490        xfs_extnum_t            junk;
1491        uint64_t                tmp;
1492        int                     error;
1493
1494        /*
1495         * Count the number of extended attribute blocks
1496         */
1497        if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
1498            ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1499                error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1500                                &aforkblks);
1501                if (error)
1502                        return error;
1503        }
1504        if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
1505            tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1506                error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1507                                &taforkblks);
1508                if (error)
1509                        return error;
1510        }
1511
1512        /*
1513         * Btree format (v3) inodes have the inode number stamped in the bmbt
1514         * block headers. We can't start changing the bmbt blocks until the
1515         * inode owner change is logged so recovery does the right thing in the
1516         * event of a crash. Set the owner change log flags now and leave the
1517         * bmbt scan as the last step.
1518         */
1519        if (xfs_has_v3inodes(ip->i_mount)) {
1520                if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1521                        (*target_log_flags) |= XFS_ILOG_DOWNER;
1522                if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1523                        (*src_log_flags) |= XFS_ILOG_DOWNER;
1524        }
1525
1526        /*
1527         * Swap the data forks of the inodes
1528         */
1529        swap(ip->i_df, tip->i_df);
1530
1531        /*
1532         * Fix the on-disk inode values
1533         */
1534        tmp = (uint64_t)ip->i_nblocks;
1535        ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1536        tip->i_nblocks = tmp + taforkblks - aforkblks;
1537
1538        /*
1539         * The extents in the source inode could still contain speculative
1540         * preallocation beyond EOF (e.g. the file is open but not modified
1541         * while defrag is in progress). In that case, we need to copy over the
1542         * number of delalloc blocks the data fork in the source inode is
1543         * tracking beyond EOF so that when the fork is truncated away when the
1544         * temporary inode is unlinked we don't underrun the i_delayed_blks
1545         * counter on that inode.
1546         */
1547        ASSERT(tip->i_delayed_blks == 0);
1548        tip->i_delayed_blks = ip->i_delayed_blks;
1549        ip->i_delayed_blks = 0;
1550
1551        switch (ip->i_df.if_format) {
1552        case XFS_DINODE_FMT_EXTENTS:
1553                (*src_log_flags) |= XFS_ILOG_DEXT;
1554                break;
1555        case XFS_DINODE_FMT_BTREE:
1556                ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1557                       (*src_log_flags & XFS_ILOG_DOWNER));
1558                (*src_log_flags) |= XFS_ILOG_DBROOT;
1559                break;
1560        }
1561
1562        switch (tip->i_df.if_format) {
1563        case XFS_DINODE_FMT_EXTENTS:
1564                (*target_log_flags) |= XFS_ILOG_DEXT;
1565                break;
1566        case XFS_DINODE_FMT_BTREE:
1567                (*target_log_flags) |= XFS_ILOG_DBROOT;
1568                ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1569                       (*target_log_flags & XFS_ILOG_DOWNER));
1570                break;
1571        }
1572
1573        return 0;
1574}
1575
1576/*
1577 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1578 * change owner scan attempts to order all modified buffers in the current
1579 * transaction. In the event of ordered buffer failure, the offending buffer is
1580 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1581 * the transaction in this case to replenish the fallback log reservation and
1582 * restart the scan. This process repeats until the scan completes.
1583 */
1584static int
1585xfs_swap_change_owner(
1586        struct xfs_trans        **tpp,
1587        struct xfs_inode        *ip,
1588        struct xfs_inode        *tmpip)
1589{
1590        int                     error;
1591        struct xfs_trans        *tp = *tpp;
1592
1593        do {
1594                error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1595                                              NULL);
1596                /* success or fatal error */
1597                if (error != -EAGAIN)
1598                        break;
1599
1600                error = xfs_trans_roll(tpp);
1601                if (error)
1602                        break;
1603                tp = *tpp;
1604
1605                /*
1606                 * Redirty both inodes so they can relog and keep the log tail
1607                 * moving forward.
1608                 */
1609                xfs_trans_ijoin(tp, ip, 0);
1610                xfs_trans_ijoin(tp, tmpip, 0);
1611                xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1612                xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1613        } while (true);
1614
1615        return error;
1616}
1617
1618int
1619xfs_swap_extents(
1620        struct xfs_inode        *ip,    /* target inode */
1621        struct xfs_inode        *tip,   /* tmp inode */
1622        struct xfs_swapext      *sxp)
1623{
1624        struct xfs_mount        *mp = ip->i_mount;
1625        struct xfs_trans        *tp;
1626        struct xfs_bstat        *sbp = &sxp->sx_stat;
1627        int                     src_log_flags, target_log_flags;
1628        int                     error = 0;
1629        uint64_t                f;
1630        int                     resblks = 0;
1631        unsigned int            flags = 0;
1632
1633        /*
1634         * Lock the inodes against other IO, page faults and truncate to
1635         * begin with.  Then we can ensure the inodes are flushed and have no
1636         * page cache safely. Once we have done this we can take the ilocks and
1637         * do the rest of the checks.
1638         */
1639        lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1640        filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
1641                                    VFS_I(tip)->i_mapping);
1642
1643        /* Verify that both files have the same format */
1644        if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1645                error = -EINVAL;
1646                goto out_unlock;
1647        }
1648
1649        /* Verify both files are either real-time or non-realtime */
1650        if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1651                error = -EINVAL;
1652                goto out_unlock;
1653        }
1654
1655        error = xfs_qm_dqattach(ip);
1656        if (error)
1657                goto out_unlock;
1658
1659        error = xfs_qm_dqattach(tip);
1660        if (error)
1661                goto out_unlock;
1662
1663        error = xfs_swap_extent_flush(ip);
1664        if (error)
1665                goto out_unlock;
1666        error = xfs_swap_extent_flush(tip);
1667        if (error)
1668                goto out_unlock;
1669
1670        if (xfs_inode_has_cow_data(tip)) {
1671                error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1672                if (error)
1673                        goto out_unlock;
1674        }
1675
1676        /*
1677         * Extent "swapping" with rmap requires a permanent reservation and
1678         * a block reservation because it's really just a remap operation
1679         * performed with log redo items!
1680         */
1681        if (xfs_has_rmapbt(mp)) {
1682                int             w = XFS_DATA_FORK;
1683                uint32_t        ipnext = ip->i_df.if_nextents;
1684                uint32_t        tipnext = tip->i_df.if_nextents;
1685
1686                /*
1687                 * Conceptually this shouldn't affect the shape of either bmbt,
1688                 * but since we atomically move extents one by one, we reserve
1689                 * enough space to rebuild both trees.
1690                 */
1691                resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1692                resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1693
1694                /*
1695                 * If either inode straddles a bmapbt block allocation boundary,
1696                 * the rmapbt algorithm triggers repeated allocs and frees as
1697                 * extents are remapped. This can exhaust the block reservation
1698                 * prematurely and cause shutdown. Return freed blocks to the
1699                 * transaction reservation to counter this behavior.
1700                 */
1701                flags |= XFS_TRANS_RES_FDBLKS;
1702        }
1703        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1704                                &tp);
1705        if (error)
1706                goto out_unlock;
1707
1708        /*
1709         * Lock and join the inodes to the tansaction so that transaction commit
1710         * or cancel will unlock the inodes from this point onwards.
1711         */
1712        xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1713        xfs_trans_ijoin(tp, ip, 0);
1714        xfs_trans_ijoin(tp, tip, 0);
1715
1716
1717        /* Verify all data are being swapped */
1718        if (sxp->sx_offset != 0 ||
1719            sxp->sx_length != ip->i_disk_size ||
1720            sxp->sx_length != tip->i_disk_size) {
1721                error = -EFAULT;
1722                goto out_trans_cancel;
1723        }
1724
1725        trace_xfs_swap_extent_before(ip, 0);
1726        trace_xfs_swap_extent_before(tip, 1);
1727
1728        /* check inode formats now that data is flushed */
1729        error = xfs_swap_extents_check_format(ip, tip);
1730        if (error) {
1731                xfs_notice(mp,
1732                    "%s: inode 0x%llx format is incompatible for exchanging.",
1733                                __func__, ip->i_ino);
1734                goto out_trans_cancel;
1735        }
1736
1737        /*
1738         * Compare the current change & modify times with that
1739         * passed in.  If they differ, we abort this swap.
1740         * This is the mechanism used to ensure the calling
1741         * process that the file was not changed out from
1742         * under it.
1743         */
1744        if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1745            (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1746            (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1747            (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1748                error = -EBUSY;
1749                goto out_trans_cancel;
1750        }
1751
1752        /*
1753         * Note the trickiness in setting the log flags - we set the owner log
1754         * flag on the opposite inode (i.e. the inode we are setting the new
1755         * owner to be) because once we swap the forks and log that, log
1756         * recovery is going to see the fork as owned by the swapped inode,
1757         * not the pre-swapped inodes.
1758         */
1759        src_log_flags = XFS_ILOG_CORE;
1760        target_log_flags = XFS_ILOG_CORE;
1761
1762        if (xfs_has_rmapbt(mp))
1763                error = xfs_swap_extent_rmap(&tp, ip, tip);
1764        else
1765                error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1766                                &target_log_flags);
1767        if (error)
1768                goto out_trans_cancel;
1769
1770        /* Do we have to swap reflink flags? */
1771        if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1772            (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1773                f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1774                ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1775                ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1776                tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1777                tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1778        }
1779
1780        /* Swap the cow forks. */
1781        if (xfs_has_reflink(mp)) {
1782                ASSERT(!ip->i_cowfp ||
1783                       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1784                ASSERT(!tip->i_cowfp ||
1785                       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1786
1787                swap(ip->i_cowfp, tip->i_cowfp);
1788
1789                if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1790                        xfs_inode_set_cowblocks_tag(ip);
1791                else
1792                        xfs_inode_clear_cowblocks_tag(ip);
1793                if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1794                        xfs_inode_set_cowblocks_tag(tip);
1795                else
1796                        xfs_inode_clear_cowblocks_tag(tip);
1797        }
1798
1799        xfs_trans_log_inode(tp, ip,  src_log_flags);
1800        xfs_trans_log_inode(tp, tip, target_log_flags);
1801
1802        /*
1803         * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1804         * have inode number owner values in the bmbt blocks that still refer to
1805         * the old inode. Scan each bmbt to fix up the owner values with the
1806         * inode number of the current inode.
1807         */
1808        if (src_log_flags & XFS_ILOG_DOWNER) {
1809                error = xfs_swap_change_owner(&tp, ip, tip);
1810                if (error)
1811                        goto out_trans_cancel;
1812        }
1813        if (target_log_flags & XFS_ILOG_DOWNER) {
1814                error = xfs_swap_change_owner(&tp, tip, ip);
1815                if (error)
1816                        goto out_trans_cancel;
1817        }
1818
1819        /*
1820         * If this is a synchronous mount, make sure that the
1821         * transaction goes to disk before returning to the user.
1822         */
1823        if (xfs_has_wsync(mp))
1824                xfs_trans_set_sync(tp);
1825
1826        error = xfs_trans_commit(tp);
1827
1828        trace_xfs_swap_extent_after(ip, 0);
1829        trace_xfs_swap_extent_after(tip, 1);
1830
1831out_unlock_ilock:
1832        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1833        xfs_iunlock(tip, XFS_ILOCK_EXCL);
1834out_unlock:
1835        filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
1836                                      VFS_I(tip)->i_mapping);
1837        unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1838        return error;
1839
1840out_trans_cancel:
1841        xfs_trans_cancel(tp);
1842        goto out_unlock_ilock;
1843}
1844