linux/fs/xfs/xfs_bmap_util.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * Copyright (c) 2012 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_bit.h"
  26#include "xfs_mount.h"
  27#include "xfs_da_format.h"
  28#include "xfs_defer.h"
  29#include "xfs_inode.h"
  30#include "xfs_btree.h"
  31#include "xfs_trans.h"
  32#include "xfs_extfree_item.h"
  33#include "xfs_alloc.h"
  34#include "xfs_bmap.h"
  35#include "xfs_bmap_util.h"
  36#include "xfs_bmap_btree.h"
  37#include "xfs_rtalloc.h"
  38#include "xfs_error.h"
  39#include "xfs_quota.h"
  40#include "xfs_trans_space.h"
  41#include "xfs_trace.h"
  42#include "xfs_icache.h"
  43#include "xfs_log.h"
  44#include "xfs_rmap_btree.h"
  45
  46/* Kernel only BMAP related definitions and functions */
  47
  48/*
  49 * Convert the given file system block to a disk block.  We have to treat it
  50 * differently based on whether the file is a real time file or not, because the
  51 * bmap code does.
  52 */
  53xfs_daddr_t
  54xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  55{
  56        return (XFS_IS_REALTIME_INODE(ip) ? \
  57                 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
  58                 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
  59}
  60
  61/*
  62 * Routine to zero an extent on disk allocated to the specific inode.
  63 *
  64 * The VFS functions take a linearised filesystem block offset, so we have to
  65 * convert the sparse xfs fsb to the right format first.
  66 * VFS types are real funky, too.
  67 */
  68int
  69xfs_zero_extent(
  70        struct xfs_inode *ip,
  71        xfs_fsblock_t   start_fsb,
  72        xfs_off_t       count_fsb)
  73{
  74        struct xfs_mount *mp = ip->i_mount;
  75        xfs_daddr_t     sector = xfs_fsb_to_db(ip, start_fsb);
  76        sector_t        block = XFS_BB_TO_FSBT(mp, sector);
  77
  78        return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
  79                block << (mp->m_super->s_blocksize_bits - 9),
  80                count_fsb << (mp->m_super->s_blocksize_bits - 9),
  81                GFP_NOFS, true);
  82}
  83
  84int
  85xfs_bmap_rtalloc(
  86        struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
  87{
  88        xfs_alloctype_t atype = 0;      /* type for allocation routines */
  89        int             error;          /* error return value */
  90        xfs_mount_t     *mp;            /* mount point structure */
  91        xfs_extlen_t    prod = 0;       /* product factor for allocators */
  92        xfs_extlen_t    ralen = 0;      /* realtime allocation length */
  93        xfs_extlen_t    align;          /* minimum allocation alignment */
  94        xfs_rtblock_t   rtb;
  95
  96        mp = ap->ip->i_mount;
  97        align = xfs_get_extsz_hint(ap->ip);
  98        prod = align / mp->m_sb.sb_rextsize;
  99        error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
 100                                        align, 1, ap->eof, 0,
 101                                        ap->conv, &ap->offset, &ap->length);
 102        if (error)
 103                return error;
 104        ASSERT(ap->length);
 105        ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
 106
 107        /*
 108         * If the offset & length are not perfectly aligned
 109         * then kill prod, it will just get us in trouble.
 110         */
 111        if (do_mod(ap->offset, align) || ap->length % align)
 112                prod = 1;
 113        /*
 114         * Set ralen to be the actual requested length in rtextents.
 115         */
 116        ralen = ap->length / mp->m_sb.sb_rextsize;
 117        /*
 118         * If the old value was close enough to MAXEXTLEN that
 119         * we rounded up to it, cut it back so it's valid again.
 120         * Note that if it's a really large request (bigger than
 121         * MAXEXTLEN), we don't hear about that number, and can't
 122         * adjust the starting point to match it.
 123         */
 124        if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
 125                ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
 126
 127        /*
 128         * Lock out modifications to both the RT bitmap and summary inodes
 129         */
 130        xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
 131        xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
 132        xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
 133        xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
 134
 135        /*
 136         * If it's an allocation to an empty file at offset 0,
 137         * pick an extent that will space things out in the rt area.
 138         */
 139        if (ap->eof && ap->offset == 0) {
 140                xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
 141
 142                error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
 143                if (error)
 144                        return error;
 145                ap->blkno = rtx * mp->m_sb.sb_rextsize;
 146        } else {
 147                ap->blkno = 0;
 148        }
 149
 150        xfs_bmap_adjacent(ap);
 151
 152        /*
 153         * Realtime allocation, done through xfs_rtallocate_extent.
 154         */
 155        atype = ap->blkno == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
 156        do_div(ap->blkno, mp->m_sb.sb_rextsize);
 157        rtb = ap->blkno;
 158        ap->length = ralen;
 159        if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
 160                                &ralen, atype, ap->wasdel, prod, &rtb)))
 161                return error;
 162        if (rtb == NULLFSBLOCK && prod > 1 &&
 163            (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
 164                                           ap->length, &ralen, atype,
 165                                           ap->wasdel, 1, &rtb)))
 166                return error;
 167        ap->blkno = rtb;
 168        if (ap->blkno != NULLFSBLOCK) {
 169                ap->blkno *= mp->m_sb.sb_rextsize;
 170                ralen *= mp->m_sb.sb_rextsize;
 171                ap->length = ralen;
 172                ap->ip->i_d.di_nblocks += ralen;
 173                xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
 174                if (ap->wasdel)
 175                        ap->ip->i_delayed_blks -= ralen;
 176                /*
 177                 * Adjust the disk quota also. This was reserved
 178                 * earlier.
 179                 */
 180                xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
 181                        ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
 182                                        XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
 183
 184                /* Zero the extent if we were asked to do so */
 185                if (ap->userdata & XFS_ALLOC_USERDATA_ZERO) {
 186                        error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
 187                        if (error)
 188                                return error;
 189                }
 190        } else {
 191                ap->length = 0;
 192        }
 193        return 0;
 194}
 195
 196/*
 197 * Check if the endoff is outside the last extent. If so the caller will grow
 198 * the allocation to a stripe unit boundary.  All offsets are considered outside
 199 * the end of file for an empty fork, so 1 is returned in *eof in that case.
 200 */
 201int
 202xfs_bmap_eof(
 203        struct xfs_inode        *ip,
 204        xfs_fileoff_t           endoff,
 205        int                     whichfork,
 206        int                     *eof)
 207{
 208        struct xfs_bmbt_irec    rec;
 209        int                     error;
 210
 211        error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
 212        if (error || *eof)
 213                return error;
 214
 215        *eof = endoff >= rec.br_startoff + rec.br_blockcount;
 216        return 0;
 217}
 218
 219/*
 220 * Extent tree block counting routines.
 221 */
 222
 223/*
 224 * Count leaf blocks given a range of extent records.
 225 */
 226STATIC void
 227xfs_bmap_count_leaves(
 228        xfs_ifork_t             *ifp,
 229        xfs_extnum_t            idx,
 230        int                     numrecs,
 231        int                     *count)
 232{
 233        int             b;
 234
 235        for (b = 0; b < numrecs; b++) {
 236                xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
 237                *count += xfs_bmbt_get_blockcount(frp);
 238        }
 239}
 240
 241/*
 242 * Count leaf blocks given a range of extent records originally
 243 * in btree format.
 244 */
 245STATIC void
 246xfs_bmap_disk_count_leaves(
 247        struct xfs_mount        *mp,
 248        struct xfs_btree_block  *block,
 249        int                     numrecs,
 250        int                     *count)
 251{
 252        int             b;
 253        xfs_bmbt_rec_t  *frp;
 254
 255        for (b = 1; b <= numrecs; b++) {
 256                frp = XFS_BMBT_REC_ADDR(mp, block, b);
 257                *count += xfs_bmbt_disk_get_blockcount(frp);
 258        }
 259}
 260
 261/*
 262 * Recursively walks each level of a btree
 263 * to count total fsblocks in use.
 264 */
 265STATIC int                                     /* error */
 266xfs_bmap_count_tree(
 267        xfs_mount_t     *mp,            /* file system mount point */
 268        xfs_trans_t     *tp,            /* transaction pointer */
 269        xfs_ifork_t     *ifp,           /* inode fork pointer */
 270        xfs_fsblock_t   blockno,        /* file system block number */
 271        int             levelin,        /* level in btree */
 272        int             *count)         /* Count of blocks */
 273{
 274        int                     error;
 275        xfs_buf_t               *bp, *nbp;
 276        int                     level = levelin;
 277        __be64                  *pp;
 278        xfs_fsblock_t           bno = blockno;
 279        xfs_fsblock_t           nextbno;
 280        struct xfs_btree_block  *block, *nextblock;
 281        int                     numrecs;
 282
 283        error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
 284                                                &xfs_bmbt_buf_ops);
 285        if (error)
 286                return error;
 287        *count += 1;
 288        block = XFS_BUF_TO_BLOCK(bp);
 289
 290        if (--level) {
 291                /* Not at node above leaves, count this level of nodes */
 292                nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 293                while (nextbno != NULLFSBLOCK) {
 294                        error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
 295                                                XFS_BMAP_BTREE_REF,
 296                                                &xfs_bmbt_buf_ops);
 297                        if (error)
 298                                return error;
 299                        *count += 1;
 300                        nextblock = XFS_BUF_TO_BLOCK(nbp);
 301                        nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
 302                        xfs_trans_brelse(tp, nbp);
 303                }
 304
 305                /* Dive to the next level */
 306                pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
 307                bno = be64_to_cpu(*pp);
 308                if (unlikely((error =
 309                     xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
 310                        xfs_trans_brelse(tp, bp);
 311                        XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
 312                                         XFS_ERRLEVEL_LOW, mp);
 313                        return -EFSCORRUPTED;
 314                }
 315                xfs_trans_brelse(tp, bp);
 316        } else {
 317                /* count all level 1 nodes and their leaves */
 318                for (;;) {
 319                        nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 320                        numrecs = be16_to_cpu(block->bb_numrecs);
 321                        xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
 322                        xfs_trans_brelse(tp, bp);
 323                        if (nextbno == NULLFSBLOCK)
 324                                break;
 325                        bno = nextbno;
 326                        error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
 327                                                XFS_BMAP_BTREE_REF,
 328                                                &xfs_bmbt_buf_ops);
 329                        if (error)
 330                                return error;
 331                        *count += 1;
 332                        block = XFS_BUF_TO_BLOCK(bp);
 333                }
 334        }
 335        return 0;
 336}
 337
 338/*
 339 * Count fsblocks of the given fork.
 340 */
 341static int                                      /* error */
 342xfs_bmap_count_blocks(
 343        xfs_trans_t             *tp,            /* transaction pointer */
 344        xfs_inode_t             *ip,            /* incore inode */
 345        int                     whichfork,      /* data or attr fork */
 346        int                     *count)         /* out: count of blocks */
 347{
 348        struct xfs_btree_block  *block; /* current btree block */
 349        xfs_fsblock_t           bno;    /* block # of "block" */
 350        xfs_ifork_t             *ifp;   /* fork structure */
 351        int                     level;  /* btree level, for checking */
 352        xfs_mount_t             *mp;    /* file system mount structure */
 353        __be64                  *pp;    /* pointer to block address */
 354
 355        bno = NULLFSBLOCK;
 356        mp = ip->i_mount;
 357        ifp = XFS_IFORK_PTR(ip, whichfork);
 358        if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
 359                xfs_bmap_count_leaves(ifp, 0,
 360                        ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
 361                        count);
 362                return 0;
 363        }
 364
 365        /*
 366         * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 367         */
 368        block = ifp->if_broot;
 369        level = be16_to_cpu(block->bb_level);
 370        ASSERT(level > 0);
 371        pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
 372        bno = be64_to_cpu(*pp);
 373        ASSERT(bno != NULLFSBLOCK);
 374        ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
 375        ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
 376
 377        if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
 378                XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
 379                                 mp);
 380                return -EFSCORRUPTED;
 381        }
 382
 383        return 0;
 384}
 385
 386/*
 387 * returns 1 for success, 0 if we failed to map the extent.
 388 */
 389STATIC int
 390xfs_getbmapx_fix_eof_hole(
 391        xfs_inode_t             *ip,            /* xfs incore inode pointer */
 392        struct getbmapx         *out,           /* output structure */
 393        int                     prealloced,     /* this is a file with
 394                                                 * preallocated data space */
 395        __int64_t               end,            /* last block requested */
 396        xfs_fsblock_t           startblock)
 397{
 398        __int64_t               fixlen;
 399        xfs_mount_t             *mp;            /* file system mount point */
 400        xfs_ifork_t             *ifp;           /* inode fork pointer */
 401        xfs_extnum_t            lastx;          /* last extent pointer */
 402        xfs_fileoff_t           fileblock;
 403
 404        if (startblock == HOLESTARTBLOCK) {
 405                mp = ip->i_mount;
 406                out->bmv_block = -1;
 407                fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
 408                fixlen -= out->bmv_offset;
 409                if (prealloced && out->bmv_offset + out->bmv_length == end) {
 410                        /* Came to hole at EOF. Trim it. */
 411                        if (fixlen <= 0)
 412                                return 0;
 413                        out->bmv_length = fixlen;
 414                }
 415        } else {
 416                if (startblock == DELAYSTARTBLOCK)
 417                        out->bmv_block = -2;
 418                else
 419                        out->bmv_block = xfs_fsb_to_db(ip, startblock);
 420                fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
 421                ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
 422                if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
 423                   (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
 424                        out->bmv_oflags |= BMV_OF_LAST;
 425        }
 426
 427        return 1;
 428}
 429
 430/*
 431 * Get inode's extents as described in bmv, and format for output.
 432 * Calls formatter to fill the user's buffer until all extents
 433 * are mapped, until the passed-in bmv->bmv_count slots have
 434 * been filled, or until the formatter short-circuits the loop,
 435 * if it is tracking filled-in extents on its own.
 436 */
 437int                                             /* error code */
 438xfs_getbmap(
 439        xfs_inode_t             *ip,
 440        struct getbmapx         *bmv,           /* user bmap structure */
 441        xfs_bmap_format_t       formatter,      /* format to user */
 442        void                    *arg)           /* formatter arg */
 443{
 444        __int64_t               bmvend;         /* last block requested */
 445        int                     error = 0;      /* return value */
 446        __int64_t               fixlen;         /* length for -1 case */
 447        int                     i;              /* extent number */
 448        int                     lock;           /* lock state */
 449        xfs_bmbt_irec_t         *map;           /* buffer for user's data */
 450        xfs_mount_t             *mp;            /* file system mount point */
 451        int                     nex;            /* # of user extents can do */
 452        int                     nexleft;        /* # of user extents left */
 453        int                     subnex;         /* # of bmapi's can do */
 454        int                     nmap;           /* number of map entries */
 455        struct getbmapx         *out;           /* output structure */
 456        int                     whichfork;      /* data or attr fork */
 457        int                     prealloced;     /* this is a file with
 458                                                 * preallocated data space */
 459        int                     iflags;         /* interface flags */
 460        int                     bmapi_flags;    /* flags for xfs_bmapi */
 461        int                     cur_ext = 0;
 462
 463        mp = ip->i_mount;
 464        iflags = bmv->bmv_iflags;
 465        whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
 466
 467        if (whichfork == XFS_ATTR_FORK) {
 468                if (XFS_IFORK_Q(ip)) {
 469                        if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
 470                            ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
 471                            ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
 472                                return -EINVAL;
 473                } else if (unlikely(
 474                           ip->i_d.di_aformat != 0 &&
 475                           ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
 476                        XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
 477                                         ip->i_mount);
 478                        return -EFSCORRUPTED;
 479                }
 480
 481                prealloced = 0;
 482                fixlen = 1LL << 32;
 483        } else {
 484                if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
 485                    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
 486                    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
 487                        return -EINVAL;
 488
 489                if (xfs_get_extsz_hint(ip) ||
 490                    ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
 491                        prealloced = 1;
 492                        fixlen = mp->m_super->s_maxbytes;
 493                } else {
 494                        prealloced = 0;
 495                        fixlen = XFS_ISIZE(ip);
 496                }
 497        }
 498
 499        if (bmv->bmv_length == -1) {
 500                fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
 501                bmv->bmv_length =
 502                        max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
 503        } else if (bmv->bmv_length == 0) {
 504                bmv->bmv_entries = 0;
 505                return 0;
 506        } else if (bmv->bmv_length < 0) {
 507                return -EINVAL;
 508        }
 509
 510        nex = bmv->bmv_count - 1;
 511        if (nex <= 0)
 512                return -EINVAL;
 513        bmvend = bmv->bmv_offset + bmv->bmv_length;
 514
 515
 516        if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
 517                return -ENOMEM;
 518        out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
 519        if (!out)
 520                return -ENOMEM;
 521
 522        xfs_ilock(ip, XFS_IOLOCK_SHARED);
 523        if (whichfork == XFS_DATA_FORK) {
 524                if (!(iflags & BMV_IF_DELALLOC) &&
 525                    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
 526                        error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 527                        if (error)
 528                                goto out_unlock_iolock;
 529
 530                        /*
 531                         * Even after flushing the inode, there can still be
 532                         * delalloc blocks on the inode beyond EOF due to
 533                         * speculative preallocation.  These are not removed
 534                         * until the release function is called or the inode
 535                         * is inactivated.  Hence we cannot assert here that
 536                         * ip->i_delayed_blks == 0.
 537                         */
 538                }
 539
 540                lock = xfs_ilock_data_map_shared(ip);
 541        } else {
 542                lock = xfs_ilock_attr_map_shared(ip);
 543        }
 544
 545        /*
 546         * Don't let nex be bigger than the number of extents
 547         * we can have assuming alternating holes and real extents.
 548         */
 549        if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
 550                nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
 551
 552        bmapi_flags = xfs_bmapi_aflag(whichfork);
 553        if (!(iflags & BMV_IF_PREALLOC))
 554                bmapi_flags |= XFS_BMAPI_IGSTATE;
 555
 556        /*
 557         * Allocate enough space to handle "subnex" maps at a time.
 558         */
 559        error = -ENOMEM;
 560        subnex = 16;
 561        map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
 562        if (!map)
 563                goto out_unlock_ilock;
 564
 565        bmv->bmv_entries = 0;
 566
 567        if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
 568            (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
 569                error = 0;
 570                goto out_free_map;
 571        }
 572
 573        nexleft = nex;
 574
 575        do {
 576                nmap = (nexleft > subnex) ? subnex : nexleft;
 577                error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
 578                                       XFS_BB_TO_FSB(mp, bmv->bmv_length),
 579                                       map, &nmap, bmapi_flags);
 580                if (error)
 581                        goto out_free_map;
 582                ASSERT(nmap <= subnex);
 583
 584                for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
 585                        out[cur_ext].bmv_oflags = 0;
 586                        if (map[i].br_state == XFS_EXT_UNWRITTEN)
 587                                out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
 588                        else if (map[i].br_startblock == DELAYSTARTBLOCK)
 589                                out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
 590                        out[cur_ext].bmv_offset =
 591                                XFS_FSB_TO_BB(mp, map[i].br_startoff);
 592                        out[cur_ext].bmv_length =
 593                                XFS_FSB_TO_BB(mp, map[i].br_blockcount);
 594                        out[cur_ext].bmv_unused1 = 0;
 595                        out[cur_ext].bmv_unused2 = 0;
 596
 597                        /*
 598                         * delayed allocation extents that start beyond EOF can
 599                         * occur due to speculative EOF allocation when the
 600                         * delalloc extent is larger than the largest freespace
 601                         * extent at conversion time. These extents cannot be
 602                         * converted by data writeback, so can exist here even
 603                         * if we are not supposed to be finding delalloc
 604                         * extents.
 605                         */
 606                        if (map[i].br_startblock == DELAYSTARTBLOCK &&
 607                            map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
 608                                ASSERT((iflags & BMV_IF_DELALLOC) != 0);
 609
 610                        if (map[i].br_startblock == HOLESTARTBLOCK &&
 611                            whichfork == XFS_ATTR_FORK) {
 612                                /* came to the end of attribute fork */
 613                                out[cur_ext].bmv_oflags |= BMV_OF_LAST;
 614                                goto out_free_map;
 615                        }
 616
 617                        if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
 618                                        prealloced, bmvend,
 619                                        map[i].br_startblock))
 620                                goto out_free_map;
 621
 622                        bmv->bmv_offset =
 623                                out[cur_ext].bmv_offset +
 624                                out[cur_ext].bmv_length;
 625                        bmv->bmv_length =
 626                                max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
 627
 628                        /*
 629                         * In case we don't want to return the hole,
 630                         * don't increase cur_ext so that we can reuse
 631                         * it in the next loop.
 632                         */
 633                        if ((iflags & BMV_IF_NO_HOLES) &&
 634                            map[i].br_startblock == HOLESTARTBLOCK) {
 635                                memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
 636                                continue;
 637                        }
 638
 639                        nexleft--;
 640                        bmv->bmv_entries++;
 641                        cur_ext++;
 642                }
 643        } while (nmap && nexleft && bmv->bmv_length);
 644
 645 out_free_map:
 646        kmem_free(map);
 647 out_unlock_ilock:
 648        xfs_iunlock(ip, lock);
 649 out_unlock_iolock:
 650        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 651
 652        for (i = 0; i < cur_ext; i++) {
 653                int full = 0;   /* user array is full */
 654
 655                /* format results & advance arg */
 656                error = formatter(&arg, &out[i], &full);
 657                if (error || full)
 658                        break;
 659        }
 660
 661        kmem_free(out);
 662        return error;
 663}
 664
 665/*
 666 * dead simple method of punching delalyed allocation blocks from a range in
 667 * the inode. Walks a block at a time so will be slow, but is only executed in
 668 * rare error cases so the overhead is not critical. This will always punch out
 669 * both the start and end blocks, even if the ranges only partially overlap
 670 * them, so it is up to the caller to ensure that partial blocks are not
 671 * passed in.
 672 */
 673int
 674xfs_bmap_punch_delalloc_range(
 675        struct xfs_inode        *ip,
 676        xfs_fileoff_t           start_fsb,
 677        xfs_fileoff_t           length)
 678{
 679        xfs_fileoff_t           remaining = length;
 680        int                     error = 0;
 681
 682        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 683
 684        do {
 685                int             done;
 686                xfs_bmbt_irec_t imap;
 687                int             nimaps = 1;
 688                xfs_fsblock_t   firstblock;
 689                struct xfs_defer_ops dfops;
 690
 691                /*
 692                 * Map the range first and check that it is a delalloc extent
 693                 * before trying to unmap the range. Otherwise we will be
 694                 * trying to remove a real extent (which requires a
 695                 * transaction) or a hole, which is probably a bad idea...
 696                 */
 697                error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
 698                                       XFS_BMAPI_ENTIRE);
 699
 700                if (error) {
 701                        /* something screwed, just bail */
 702                        if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 703                                xfs_alert(ip->i_mount,
 704                        "Failed delalloc mapping lookup ino %lld fsb %lld.",
 705                                                ip->i_ino, start_fsb);
 706                        }
 707                        break;
 708                }
 709                if (!nimaps) {
 710                        /* nothing there */
 711                        goto next_block;
 712                }
 713                if (imap.br_startblock != DELAYSTARTBLOCK) {
 714                        /* been converted, ignore */
 715                        goto next_block;
 716                }
 717                WARN_ON(imap.br_blockcount == 0);
 718
 719                /*
 720                 * Note: while we initialise the firstblock/dfops pair, they
 721                 * should never be used because blocks should never be
 722                 * allocated or freed for a delalloc extent and hence we need
 723                 * don't cancel or finish them after the xfs_bunmapi() call.
 724                 */
 725                xfs_defer_init(&dfops, &firstblock);
 726                error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
 727                                        &dfops, &done);
 728                if (error)
 729                        break;
 730
 731                ASSERT(!xfs_defer_has_unfinished_work(&dfops));
 732next_block:
 733                start_fsb++;
 734                remaining--;
 735        } while(remaining > 0);
 736
 737        return error;
 738}
 739
 740/*
 741 * Test whether it is appropriate to check an inode for and free post EOF
 742 * blocks. The 'force' parameter determines whether we should also consider
 743 * regular files that are marked preallocated or append-only.
 744 */
 745bool
 746xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
 747{
 748        /* prealloc/delalloc exists only on regular files */
 749        if (!S_ISREG(VFS_I(ip)->i_mode))
 750                return false;
 751
 752        /*
 753         * Zero sized files with no cached pages and delalloc blocks will not
 754         * have speculative prealloc/delalloc blocks to remove.
 755         */
 756        if (VFS_I(ip)->i_size == 0 &&
 757            VFS_I(ip)->i_mapping->nrpages == 0 &&
 758            ip->i_delayed_blks == 0)
 759                return false;
 760
 761        /* If we haven't read in the extent list, then don't do it now. */
 762        if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
 763                return false;
 764
 765        /*
 766         * Do not free real preallocated or append-only files unless the file
 767         * has delalloc blocks and we are forced to remove them.
 768         */
 769        if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
 770                if (!force || ip->i_delayed_blks == 0)
 771                        return false;
 772
 773        return true;
 774}
 775
 776/*
 777 * This is called by xfs_inactive to free any blocks beyond eof
 778 * when the link count isn't zero and by xfs_dm_punch_hole() when
 779 * punching a hole to EOF.
 780 */
 781int
 782xfs_free_eofblocks(
 783        xfs_mount_t     *mp,
 784        xfs_inode_t     *ip,
 785        bool            need_iolock)
 786{
 787        xfs_trans_t     *tp;
 788        int             error;
 789        xfs_fileoff_t   end_fsb;
 790        xfs_fileoff_t   last_fsb;
 791        xfs_filblks_t   map_len;
 792        int             nimaps;
 793        xfs_bmbt_irec_t imap;
 794
 795        /*
 796         * Figure out if there are any blocks beyond the end
 797         * of the file.  If not, then there is nothing to do.
 798         */
 799        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
 800        last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 801        if (last_fsb <= end_fsb)
 802                return 0;
 803        map_len = last_fsb - end_fsb;
 804
 805        nimaps = 1;
 806        xfs_ilock(ip, XFS_ILOCK_SHARED);
 807        error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
 808        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 809
 810        if (!error && (nimaps != 0) &&
 811            (imap.br_startblock != HOLESTARTBLOCK ||
 812             ip->i_delayed_blks)) {
 813                /*
 814                 * Attach the dquots to the inode up front.
 815                 */
 816                error = xfs_qm_dqattach(ip, 0);
 817                if (error)
 818                        return error;
 819
 820                /*
 821                 * There are blocks after the end of file.
 822                 * Free them up now by truncating the file to
 823                 * its current size.
 824                 */
 825                if (need_iolock) {
 826                        if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
 827                                return -EAGAIN;
 828                }
 829
 830                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
 831                                &tp);
 832                if (error) {
 833                        ASSERT(XFS_FORCED_SHUTDOWN(mp));
 834                        if (need_iolock)
 835                                xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 836                        return error;
 837                }
 838
 839                xfs_ilock(ip, XFS_ILOCK_EXCL);
 840                xfs_trans_ijoin(tp, ip, 0);
 841
 842                /*
 843                 * Do not update the on-disk file size.  If we update the
 844                 * on-disk file size and then the system crashes before the
 845                 * contents of the file are flushed to disk then the files
 846                 * may be full of holes (ie NULL files bug).
 847                 */
 848                error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
 849                                              XFS_ISIZE(ip));
 850                if (error) {
 851                        /*
 852                         * If we get an error at this point we simply don't
 853                         * bother truncating the file.
 854                         */
 855                        xfs_trans_cancel(tp);
 856                } else {
 857                        error = xfs_trans_commit(tp);
 858                        if (!error)
 859                                xfs_inode_clear_eofblocks_tag(ip);
 860                }
 861
 862                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 863                if (need_iolock)
 864                        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 865        }
 866        return error;
 867}
 868
 869int
 870xfs_alloc_file_space(
 871        struct xfs_inode        *ip,
 872        xfs_off_t               offset,
 873        xfs_off_t               len,
 874        int                     alloc_type)
 875{
 876        xfs_mount_t             *mp = ip->i_mount;
 877        xfs_off_t               count;
 878        xfs_filblks_t           allocated_fsb;
 879        xfs_filblks_t           allocatesize_fsb;
 880        xfs_extlen_t            extsz, temp;
 881        xfs_fileoff_t           startoffset_fsb;
 882        xfs_fsblock_t           firstfsb;
 883        int                     nimaps;
 884        int                     quota_flag;
 885        int                     rt;
 886        xfs_trans_t             *tp;
 887        xfs_bmbt_irec_t         imaps[1], *imapp;
 888        struct xfs_defer_ops    dfops;
 889        uint                    qblocks, resblks, resrtextents;
 890        int                     error;
 891
 892        trace_xfs_alloc_file_space(ip);
 893
 894        if (XFS_FORCED_SHUTDOWN(mp))
 895                return -EIO;
 896
 897        error = xfs_qm_dqattach(ip, 0);
 898        if (error)
 899                return error;
 900
 901        if (len <= 0)
 902                return -EINVAL;
 903
 904        rt = XFS_IS_REALTIME_INODE(ip);
 905        extsz = xfs_get_extsz_hint(ip);
 906
 907        count = len;
 908        imapp = &imaps[0];
 909        nimaps = 1;
 910        startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
 911        allocatesize_fsb = XFS_B_TO_FSB(mp, count);
 912
 913        /*
 914         * Allocate file space until done or until there is an error
 915         */
 916        while (allocatesize_fsb && !error) {
 917                xfs_fileoff_t   s, e;
 918
 919                /*
 920                 * Determine space reservations for data/realtime.
 921                 */
 922                if (unlikely(extsz)) {
 923                        s = startoffset_fsb;
 924                        do_div(s, extsz);
 925                        s *= extsz;
 926                        e = startoffset_fsb + allocatesize_fsb;
 927                        if ((temp = do_mod(startoffset_fsb, extsz)))
 928                                e += temp;
 929                        if ((temp = do_mod(e, extsz)))
 930                                e += extsz - temp;
 931                } else {
 932                        s = 0;
 933                        e = allocatesize_fsb;
 934                }
 935
 936                /*
 937                 * The transaction reservation is limited to a 32-bit block
 938                 * count, hence we need to limit the number of blocks we are
 939                 * trying to reserve to avoid an overflow. We can't allocate
 940                 * more than @nimaps extents, and an extent is limited on disk
 941                 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
 942                 */
 943                resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
 944                if (unlikely(rt)) {
 945                        resrtextents = qblocks = resblks;
 946                        resrtextents /= mp->m_sb.sb_rextsize;
 947                        resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 948                        quota_flag = XFS_QMOPT_RES_RTBLKS;
 949                } else {
 950                        resrtextents = 0;
 951                        resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
 952                        quota_flag = XFS_QMOPT_RES_REGBLKS;
 953                }
 954
 955                /*
 956                 * Allocate and setup the transaction.
 957                 */
 958                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
 959                                resrtextents, 0, &tp);
 960
 961                /*
 962                 * Check for running out of space
 963                 */
 964                if (error) {
 965                        /*
 966                         * Free the transaction structure.
 967                         */
 968                        ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
 969                        break;
 970                }
 971                xfs_ilock(ip, XFS_ILOCK_EXCL);
 972                error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
 973                                                      0, quota_flag);
 974                if (error)
 975                        goto error1;
 976
 977                xfs_trans_ijoin(tp, ip, 0);
 978
 979                xfs_defer_init(&dfops, &firstfsb);
 980                error = xfs_bmapi_write(tp, ip, startoffset_fsb,
 981                                        allocatesize_fsb, alloc_type, &firstfsb,
 982                                        resblks, imapp, &nimaps, &dfops);
 983                if (error)
 984                        goto error0;
 985
 986                /*
 987                 * Complete the transaction
 988                 */
 989                error = xfs_defer_finish(&tp, &dfops, NULL);
 990                if (error)
 991                        goto error0;
 992
 993                error = xfs_trans_commit(tp);
 994                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 995                if (error)
 996                        break;
 997
 998                allocated_fsb = imapp->br_blockcount;
 999
1000                if (nimaps == 0) {
1001                        error = -ENOSPC;
1002                        break;
1003                }
1004
1005                startoffset_fsb += allocated_fsb;
1006                allocatesize_fsb -= allocated_fsb;
1007        }
1008
1009        return error;
1010
1011error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1012        xfs_defer_cancel(&dfops);
1013        xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1014
1015error1: /* Just cancel transaction */
1016        xfs_trans_cancel(tp);
1017        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1018        return error;
1019}
1020
1021static int
1022xfs_unmap_extent(
1023        struct xfs_inode        *ip,
1024        xfs_fileoff_t           startoffset_fsb,
1025        xfs_filblks_t           len_fsb,
1026        int                     *done)
1027{
1028        struct xfs_mount        *mp = ip->i_mount;
1029        struct xfs_trans        *tp;
1030        struct xfs_defer_ops    dfops;
1031        xfs_fsblock_t           firstfsb;
1032        uint                    resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1033        int                     error;
1034
1035        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1036        if (error) {
1037                ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1038                return error;
1039        }
1040
1041        xfs_ilock(ip, XFS_ILOCK_EXCL);
1042        error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1043                        ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1044        if (error)
1045                goto out_trans_cancel;
1046
1047        xfs_trans_ijoin(tp, ip, 0);
1048
1049        xfs_defer_init(&dfops, &firstfsb);
1050        error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
1051                        &dfops, done);
1052        if (error)
1053                goto out_bmap_cancel;
1054
1055        error = xfs_defer_finish(&tp, &dfops, ip);
1056        if (error)
1057                goto out_bmap_cancel;
1058
1059        error = xfs_trans_commit(tp);
1060out_unlock:
1061        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1062        return error;
1063
1064out_bmap_cancel:
1065        xfs_defer_cancel(&dfops);
1066out_trans_cancel:
1067        xfs_trans_cancel(tp);
1068        goto out_unlock;
1069}
1070
1071static int
1072xfs_adjust_extent_unmap_boundaries(
1073        struct xfs_inode        *ip,
1074        xfs_fileoff_t           *startoffset_fsb,
1075        xfs_fileoff_t           *endoffset_fsb)
1076{
1077        struct xfs_mount        *mp = ip->i_mount;
1078        struct xfs_bmbt_irec    imap;
1079        int                     nimap, error;
1080        xfs_extlen_t            mod = 0;
1081
1082        nimap = 1;
1083        error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1084        if (error)
1085                return error;
1086
1087        if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1088                xfs_daddr_t     block;
1089
1090                ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1091                block = imap.br_startblock;
1092                mod = do_div(block, mp->m_sb.sb_rextsize);
1093                if (mod)
1094                        *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1095        }
1096
1097        nimap = 1;
1098        error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1099        if (error)
1100                return error;
1101
1102        if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1103                ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1104                mod++;
1105                if (mod && mod != mp->m_sb.sb_rextsize)
1106                        *endoffset_fsb -= mod;
1107        }
1108
1109        return 0;
1110}
1111
1112static int
1113xfs_flush_unmap_range(
1114        struct xfs_inode        *ip,
1115        xfs_off_t               offset,
1116        xfs_off_t               len)
1117{
1118        struct xfs_mount        *mp = ip->i_mount;
1119        struct inode            *inode = VFS_I(ip);
1120        xfs_off_t               rounding, start, end;
1121        int                     error;
1122
1123        /* wait for the completion of any pending DIOs */
1124        inode_dio_wait(inode);
1125
1126        rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1127        start = round_down(offset, rounding);
1128        end = round_up(offset + len, rounding) - 1;
1129
1130        error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1131        if (error)
1132                return error;
1133        truncate_pagecache_range(inode, start, end);
1134        return 0;
1135}
1136
1137int
1138xfs_free_file_space(
1139        struct xfs_inode        *ip,
1140        xfs_off_t               offset,
1141        xfs_off_t               len)
1142{
1143        struct xfs_mount        *mp = ip->i_mount;
1144        xfs_fileoff_t           startoffset_fsb;
1145        xfs_fileoff_t           endoffset_fsb;
1146        int                     done = 0, error;
1147
1148        trace_xfs_free_file_space(ip);
1149
1150        error = xfs_qm_dqattach(ip, 0);
1151        if (error)
1152                return error;
1153
1154        if (len <= 0)   /* if nothing being freed */
1155                return 0;
1156
1157        error = xfs_flush_unmap_range(ip, offset, len);
1158        if (error)
1159                return error;
1160
1161        startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1162        endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1163
1164        /*
1165         * Need to zero the stuff we're not freeing, on disk.  If it's a RT file
1166         * and we can't use unwritten extents then we actually need to ensure
1167         * to zero the whole extent, otherwise we just need to take of block
1168         * boundaries, and xfs_bunmapi will handle the rest.
1169         */
1170        if (XFS_IS_REALTIME_INODE(ip) &&
1171            !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1172                error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1173                                &endoffset_fsb);
1174                if (error)
1175                        return error;
1176        }
1177
1178        if (endoffset_fsb > startoffset_fsb) {
1179                while (!done) {
1180                        error = xfs_unmap_extent(ip, startoffset_fsb,
1181                                        endoffset_fsb - startoffset_fsb, &done);
1182                        if (error)
1183                                return error;
1184                }
1185        }
1186
1187        /*
1188         * Now that we've unmap all full blocks we'll have to zero out any
1189         * partial block at the beginning and/or end.  xfs_zero_range is
1190         * smart enough to skip any holes, including those we just created.
1191         */
1192        return xfs_zero_range(ip, offset, len, NULL);
1193}
1194
1195/*
1196 * Preallocate and zero a range of a file. This mechanism has the allocation
1197 * semantics of fallocate and in addition converts data in the range to zeroes.
1198 */
1199int
1200xfs_zero_file_space(
1201        struct xfs_inode        *ip,
1202        xfs_off_t               offset,
1203        xfs_off_t               len)
1204{
1205        struct xfs_mount        *mp = ip->i_mount;
1206        uint                    blksize;
1207        int                     error;
1208
1209        trace_xfs_zero_file_space(ip);
1210
1211        blksize = 1 << mp->m_sb.sb_blocklog;
1212
1213        /*
1214         * Punch a hole and prealloc the range. We use hole punch rather than
1215         * unwritten extent conversion for two reasons:
1216         *
1217         * 1.) Hole punch handles partial block zeroing for us.
1218         *
1219         * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1220         * by virtue of the hole punch.
1221         */
1222        error = xfs_free_file_space(ip, offset, len);
1223        if (error)
1224                goto out;
1225
1226        error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1227                                     round_up(offset + len, blksize) -
1228                                     round_down(offset, blksize),
1229                                     XFS_BMAPI_PREALLOC);
1230out:
1231        return error;
1232
1233}
1234
1235/*
1236 * @next_fsb will keep track of the extent currently undergoing shift.
1237 * @stop_fsb will keep track of the extent at which we have to stop.
1238 * If we are shifting left, we will start with block (offset + len) and
1239 * shift each extent till last extent.
1240 * If we are shifting right, we will start with last extent inside file space
1241 * and continue until we reach the block corresponding to offset.
1242 */
1243static int
1244xfs_shift_file_space(
1245        struct xfs_inode        *ip,
1246        xfs_off_t               offset,
1247        xfs_off_t               len,
1248        enum shift_direction    direction)
1249{
1250        int                     done = 0;
1251        struct xfs_mount        *mp = ip->i_mount;
1252        struct xfs_trans        *tp;
1253        int                     error;
1254        struct xfs_defer_ops    dfops;
1255        xfs_fsblock_t           first_block;
1256        xfs_fileoff_t           stop_fsb;
1257        xfs_fileoff_t           next_fsb;
1258        xfs_fileoff_t           shift_fsb;
1259
1260        ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
1261
1262        if (direction == SHIFT_LEFT) {
1263                next_fsb = XFS_B_TO_FSB(mp, offset + len);
1264                stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1265        } else {
1266                /*
1267                 * If right shift, delegate the work of initialization of
1268                 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1269                 */
1270                next_fsb = NULLFSBLOCK;
1271                stop_fsb = XFS_B_TO_FSB(mp, offset);
1272        }
1273
1274        shift_fsb = XFS_B_TO_FSB(mp, len);
1275
1276        /*
1277         * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1278         * into the accessible region of the file.
1279         */
1280        if (xfs_can_free_eofblocks(ip, true)) {
1281                error = xfs_free_eofblocks(mp, ip, false);
1282                if (error)
1283                        return error;
1284        }
1285
1286        /*
1287         * Writeback and invalidate cache for the remainder of the file as we're
1288         * about to shift down every extent from offset to EOF.
1289         */
1290        error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1291                                             offset, -1);
1292        if (error)
1293                return error;
1294        error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1295                                        offset >> PAGE_SHIFT, -1);
1296        if (error)
1297                return error;
1298
1299        /*
1300         * The extent shiting code works on extent granularity. So, if
1301         * stop_fsb is not the starting block of extent, we need to split
1302         * the extent at stop_fsb.
1303         */
1304        if (direction == SHIFT_RIGHT) {
1305                error = xfs_bmap_split_extent(ip, stop_fsb);
1306                if (error)
1307                        return error;
1308        }
1309
1310        while (!error && !done) {
1311                /*
1312                 * We would need to reserve permanent block for transaction.
1313                 * This will come into picture when after shifting extent into
1314                 * hole we found that adjacent extents can be merged which
1315                 * may lead to freeing of a block during record update.
1316                 */
1317                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1318                                XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1319                if (error)
1320                        break;
1321
1322                xfs_ilock(ip, XFS_ILOCK_EXCL);
1323                error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1324                                ip->i_gdquot, ip->i_pdquot,
1325                                XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1326                                XFS_QMOPT_RES_REGBLKS);
1327                if (error)
1328                        goto out_trans_cancel;
1329
1330                xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1331
1332                xfs_defer_init(&dfops, &first_block);
1333
1334                /*
1335                 * We are using the write transaction in which max 2 bmbt
1336                 * updates are allowed
1337                 */
1338                error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1339                                &done, stop_fsb, &first_block, &dfops,
1340                                direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
1341                if (error)
1342                        goto out_bmap_cancel;
1343
1344                error = xfs_defer_finish(&tp, &dfops, NULL);
1345                if (error)
1346                        goto out_bmap_cancel;
1347
1348                error = xfs_trans_commit(tp);
1349        }
1350
1351        return error;
1352
1353out_bmap_cancel:
1354        xfs_defer_cancel(&dfops);
1355out_trans_cancel:
1356        xfs_trans_cancel(tp);
1357        return error;
1358}
1359
1360/*
1361 * xfs_collapse_file_space()
1362 *      This routine frees disk space and shift extent for the given file.
1363 *      The first thing we do is to free data blocks in the specified range
1364 *      by calling xfs_free_file_space(). It would also sync dirty data
1365 *      and invalidate page cache over the region on which collapse range
1366 *      is working. And Shift extent records to the left to cover a hole.
1367 * RETURNS:
1368 *      0 on success
1369 *      errno on error
1370 *
1371 */
1372int
1373xfs_collapse_file_space(
1374        struct xfs_inode        *ip,
1375        xfs_off_t               offset,
1376        xfs_off_t               len)
1377{
1378        int error;
1379
1380        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1381        trace_xfs_collapse_file_space(ip);
1382
1383        error = xfs_free_file_space(ip, offset, len);
1384        if (error)
1385                return error;
1386
1387        return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1388}
1389
1390/*
1391 * xfs_insert_file_space()
1392 *      This routine create hole space by shifting extents for the given file.
1393 *      The first thing we do is to sync dirty data and invalidate page cache
1394 *      over the region on which insert range is working. And split an extent
1395 *      to two extents at given offset by calling xfs_bmap_split_extent.
1396 *      And shift all extent records which are laying between [offset,
1397 *      last allocated extent] to the right to reserve hole range.
1398 * RETURNS:
1399 *      0 on success
1400 *      errno on error
1401 */
1402int
1403xfs_insert_file_space(
1404        struct xfs_inode        *ip,
1405        loff_t                  offset,
1406        loff_t                  len)
1407{
1408        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1409        trace_xfs_insert_file_space(ip);
1410
1411        return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1412}
1413
1414/*
1415 * We need to check that the format of the data fork in the temporary inode is
1416 * valid for the target inode before doing the swap. This is not a problem with
1417 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1418 * data fork depending on the space the attribute fork is taking so we can get
1419 * invalid formats on the target inode.
1420 *
1421 * E.g. target has space for 7 extents in extent format, temp inode only has
1422 * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1423 * btree, but when swapped it needs to be in extent format. Hence we can't just
1424 * blindly swap data forks on attr2 filesystems.
1425 *
1426 * Note that we check the swap in both directions so that we don't end up with
1427 * a corrupt temporary inode, either.
1428 *
1429 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1430 * inode will prevent this situation from occurring, so all we do here is
1431 * reject and log the attempt. basically we are putting the responsibility on
1432 * userspace to get this right.
1433 */
1434static int
1435xfs_swap_extents_check_format(
1436        xfs_inode_t     *ip,    /* target inode */
1437        xfs_inode_t     *tip)   /* tmp inode */
1438{
1439
1440        /* Should never get a local format */
1441        if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1442            tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1443                return -EINVAL;
1444
1445        /*
1446         * if the target inode has less extents that then temporary inode then
1447         * why did userspace call us?
1448         */
1449        if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1450                return -EINVAL;
1451
1452        /*
1453         * if the target inode is in extent form and the temp inode is in btree
1454         * form then we will end up with the target inode in the wrong format
1455         * as we already know there are less extents in the temp inode.
1456         */
1457        if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1458            tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1459                return -EINVAL;
1460
1461        /* Check temp in extent form to max in target */
1462        if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1463            XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1464                        XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1465                return -EINVAL;
1466
1467        /* Check target in extent form to max in temp */
1468        if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1469            XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1470                        XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1471                return -EINVAL;
1472
1473        /*
1474         * If we are in a btree format, check that the temp root block will fit
1475         * in the target and that it has enough extents to be in btree format
1476         * in the target.
1477         *
1478         * Note that we have to be careful to allow btree->extent conversions
1479         * (a common defrag case) which will occur when the temp inode is in
1480         * extent format...
1481         */
1482        if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1483                if (XFS_IFORK_BOFF(ip) &&
1484                    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1485                        return -EINVAL;
1486                if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1487                    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1488                        return -EINVAL;
1489        }
1490
1491        /* Reciprocal target->temp btree format checks */
1492        if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1493                if (XFS_IFORK_BOFF(tip) &&
1494                    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1495                        return -EINVAL;
1496                if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1497                    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1498                        return -EINVAL;
1499        }
1500
1501        return 0;
1502}
1503
1504static int
1505xfs_swap_extent_flush(
1506        struct xfs_inode        *ip)
1507{
1508        int     error;
1509
1510        error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1511        if (error)
1512                return error;
1513        truncate_pagecache_range(VFS_I(ip), 0, -1);
1514
1515        /* Verify O_DIRECT for ftmp */
1516        if (VFS_I(ip)->i_mapping->nrpages)
1517                return -EINVAL;
1518        return 0;
1519}
1520
1521int
1522xfs_swap_extents(
1523        xfs_inode_t     *ip,    /* target inode */
1524        xfs_inode_t     *tip,   /* tmp inode */
1525        xfs_swapext_t   *sxp)
1526{
1527        xfs_mount_t     *mp = ip->i_mount;
1528        xfs_trans_t     *tp;
1529        xfs_bstat_t     *sbp = &sxp->sx_stat;
1530        xfs_ifork_t     *tempifp, *ifp, *tifp;
1531        int             src_log_flags, target_log_flags;
1532        int             error = 0;
1533        int             aforkblks = 0;
1534        int             taforkblks = 0;
1535        __uint64_t      tmp;
1536        int             lock_flags;
1537
1538        /* XXX: we can't do this with rmap, will fix later */
1539        if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1540                return -EOPNOTSUPP;
1541
1542        tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1543        if (!tempifp) {
1544                error = -ENOMEM;
1545                goto out;
1546        }
1547
1548        /*
1549         * Lock the inodes against other IO, page faults and truncate to
1550         * begin with.  Then we can ensure the inodes are flushed and have no
1551         * page cache safely. Once we have done this we can take the ilocks and
1552         * do the rest of the checks.
1553         */
1554        lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
1555        xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1556        xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
1557
1558        /* Verify that both files have the same format */
1559        if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1560                error = -EINVAL;
1561                goto out_unlock;
1562        }
1563
1564        /* Verify both files are either real-time or non-realtime */
1565        if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1566                error = -EINVAL;
1567                goto out_unlock;
1568        }
1569
1570        error = xfs_swap_extent_flush(ip);
1571        if (error)
1572                goto out_unlock;
1573        error = xfs_swap_extent_flush(tip);
1574        if (error)
1575                goto out_unlock;
1576
1577        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1578        if (error)
1579                goto out_unlock;
1580
1581        /*
1582         * Lock and join the inodes to the tansaction so that transaction commit
1583         * or cancel will unlock the inodes from this point onwards.
1584         */
1585        xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1586        lock_flags |= XFS_ILOCK_EXCL;
1587        xfs_trans_ijoin(tp, ip, lock_flags);
1588        xfs_trans_ijoin(tp, tip, lock_flags);
1589
1590
1591        /* Verify all data are being swapped */
1592        if (sxp->sx_offset != 0 ||
1593            sxp->sx_length != ip->i_d.di_size ||
1594            sxp->sx_length != tip->i_d.di_size) {
1595                error = -EFAULT;
1596                goto out_trans_cancel;
1597        }
1598
1599        trace_xfs_swap_extent_before(ip, 0);
1600        trace_xfs_swap_extent_before(tip, 1);
1601
1602        /* check inode formats now that data is flushed */
1603        error = xfs_swap_extents_check_format(ip, tip);
1604        if (error) {
1605                xfs_notice(mp,
1606                    "%s: inode 0x%llx format is incompatible for exchanging.",
1607                                __func__, ip->i_ino);
1608                goto out_trans_cancel;
1609        }
1610
1611        /*
1612         * Compare the current change & modify times with that
1613         * passed in.  If they differ, we abort this swap.
1614         * This is the mechanism used to ensure the calling
1615         * process that the file was not changed out from
1616         * under it.
1617         */
1618        if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1619            (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1620            (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1621            (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1622                error = -EBUSY;
1623                goto out_trans_cancel;
1624        }
1625        /*
1626         * Count the number of extended attribute blocks
1627         */
1628        if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1629             (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1630                error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1631                if (error)
1632                        goto out_trans_cancel;
1633        }
1634        if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1635             (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1636                error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1637                        &taforkblks);
1638                if (error)
1639                        goto out_trans_cancel;
1640        }
1641
1642        /*
1643         * Before we've swapped the forks, lets set the owners of the forks
1644         * appropriately. We have to do this as we are demand paging the btree
1645         * buffers, and so the validation done on read will expect the owner
1646         * field to be correctly set. Once we change the owners, we can swap the
1647         * inode forks.
1648         *
1649         * Note the trickiness in setting the log flags - we set the owner log
1650         * flag on the opposite inode (i.e. the inode we are setting the new
1651         * owner to be) because once we swap the forks and log that, log
1652         * recovery is going to see the fork as owned by the swapped inode,
1653         * not the pre-swapped inodes.
1654         */
1655        src_log_flags = XFS_ILOG_CORE;
1656        target_log_flags = XFS_ILOG_CORE;
1657        if (ip->i_d.di_version == 3 &&
1658            ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1659                target_log_flags |= XFS_ILOG_DOWNER;
1660                error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1661                                              tip->i_ino, NULL);
1662                if (error)
1663                        goto out_trans_cancel;
1664        }
1665
1666        if (tip->i_d.di_version == 3 &&
1667            tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1668                src_log_flags |= XFS_ILOG_DOWNER;
1669                error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1670                                              ip->i_ino, NULL);
1671                if (error)
1672                        goto out_trans_cancel;
1673        }
1674
1675        /*
1676         * Swap the data forks of the inodes
1677         */
1678        ifp = &ip->i_df;
1679        tifp = &tip->i_df;
1680        *tempifp = *ifp;        /* struct copy */
1681        *ifp = *tifp;           /* struct copy */
1682        *tifp = *tempifp;       /* struct copy */
1683
1684        /*
1685         * Fix the on-disk inode values
1686         */
1687        tmp = (__uint64_t)ip->i_d.di_nblocks;
1688        ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1689        tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1690
1691        tmp = (__uint64_t) ip->i_d.di_nextents;
1692        ip->i_d.di_nextents = tip->i_d.di_nextents;
1693        tip->i_d.di_nextents = tmp;
1694
1695        tmp = (__uint64_t) ip->i_d.di_format;
1696        ip->i_d.di_format = tip->i_d.di_format;
1697        tip->i_d.di_format = tmp;
1698
1699        /*
1700         * The extents in the source inode could still contain speculative
1701         * preallocation beyond EOF (e.g. the file is open but not modified
1702         * while defrag is in progress). In that case, we need to copy over the
1703         * number of delalloc blocks the data fork in the source inode is
1704         * tracking beyond EOF so that when the fork is truncated away when the
1705         * temporary inode is unlinked we don't underrun the i_delayed_blks
1706         * counter on that inode.
1707         */
1708        ASSERT(tip->i_delayed_blks == 0);
1709        tip->i_delayed_blks = ip->i_delayed_blks;
1710        ip->i_delayed_blks = 0;
1711
1712        switch (ip->i_d.di_format) {
1713        case XFS_DINODE_FMT_EXTENTS:
1714                /* If the extents fit in the inode, fix the
1715                 * pointer.  Otherwise it's already NULL or
1716                 * pointing to the extent.
1717                 */
1718                if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1719                        ifp->if_u1.if_extents =
1720                                ifp->if_u2.if_inline_ext;
1721                }
1722                src_log_flags |= XFS_ILOG_DEXT;
1723                break;
1724        case XFS_DINODE_FMT_BTREE:
1725                ASSERT(ip->i_d.di_version < 3 ||
1726                       (src_log_flags & XFS_ILOG_DOWNER));
1727                src_log_flags |= XFS_ILOG_DBROOT;
1728                break;
1729        }
1730
1731        switch (tip->i_d.di_format) {
1732        case XFS_DINODE_FMT_EXTENTS:
1733                /* If the extents fit in the inode, fix the
1734                 * pointer.  Otherwise it's already NULL or
1735                 * pointing to the extent.
1736                 */
1737                if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1738                        tifp->if_u1.if_extents =
1739                                tifp->if_u2.if_inline_ext;
1740                }
1741                target_log_flags |= XFS_ILOG_DEXT;
1742                break;
1743        case XFS_DINODE_FMT_BTREE:
1744                target_log_flags |= XFS_ILOG_DBROOT;
1745                ASSERT(tip->i_d.di_version < 3 ||
1746                       (target_log_flags & XFS_ILOG_DOWNER));
1747                break;
1748        }
1749
1750        xfs_trans_log_inode(tp, ip,  src_log_flags);
1751        xfs_trans_log_inode(tp, tip, target_log_flags);
1752
1753        /*
1754         * If this is a synchronous mount, make sure that the
1755         * transaction goes to disk before returning to the user.
1756         */
1757        if (mp->m_flags & XFS_MOUNT_WSYNC)
1758                xfs_trans_set_sync(tp);
1759
1760        error = xfs_trans_commit(tp);
1761
1762        trace_xfs_swap_extent_after(ip, 0);
1763        trace_xfs_swap_extent_after(tip, 1);
1764out:
1765        kmem_free(tempifp);
1766        return error;
1767
1768out_unlock:
1769        xfs_iunlock(ip, lock_flags);
1770        xfs_iunlock(tip, lock_flags);
1771        goto out;
1772
1773out_trans_cancel:
1774        xfs_trans_cancel(tp);
1775        goto out;
1776}
1777