linux/fs/xfs/xfs_iomap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * Copyright (c) 2016-2018 Christoph Hellwig.
   5 * All Rights Reserved.
   6 */
   7#include <linux/iomap.h>
   8#include "xfs.h"
   9#include "xfs_fs.h"
  10#include "xfs_shared.h"
  11#include "xfs_format.h"
  12#include "xfs_log_format.h"
  13#include "xfs_trans_resv.h"
  14#include "xfs_mount.h"
  15#include "xfs_defer.h"
  16#include "xfs_inode.h"
  17#include "xfs_btree.h"
  18#include "xfs_bmap_btree.h"
  19#include "xfs_bmap.h"
  20#include "xfs_bmap_util.h"
  21#include "xfs_errortag.h"
  22#include "xfs_error.h"
  23#include "xfs_trans.h"
  24#include "xfs_trans_space.h"
  25#include "xfs_inode_item.h"
  26#include "xfs_iomap.h"
  27#include "xfs_trace.h"
  28#include "xfs_icache.h"
  29#include "xfs_quota.h"
  30#include "xfs_dquot_item.h"
  31#include "xfs_dquot.h"
  32#include "xfs_reflink.h"
  33
  34
  35#define XFS_WRITEIO_ALIGN(mp,off)       (((off) >> mp->m_writeio_log) \
  36                                                << mp->m_writeio_log)
  37
  38static int
  39xfs_alert_fsblock_zero(
  40        xfs_inode_t     *ip,
  41        xfs_bmbt_irec_t *imap)
  42{
  43        xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
  44                        "Access to block zero in inode %llu "
  45                        "start_block: %llx start_off: %llx "
  46                        "blkcnt: %llx extent-state: %x",
  47                (unsigned long long)ip->i_ino,
  48                (unsigned long long)imap->br_startblock,
  49                (unsigned long long)imap->br_startoff,
  50                (unsigned long long)imap->br_blockcount,
  51                imap->br_state);
  52        return -EFSCORRUPTED;
  53}
  54
  55int
  56xfs_bmbt_to_iomap(
  57        struct xfs_inode        *ip,
  58        struct iomap            *iomap,
  59        struct xfs_bmbt_irec    *imap,
  60        bool                    shared)
  61{
  62        struct xfs_mount        *mp = ip->i_mount;
  63
  64        if (unlikely(!imap->br_startblock && !XFS_IS_REALTIME_INODE(ip)))
  65                return xfs_alert_fsblock_zero(ip, imap);
  66
  67        if (imap->br_startblock == HOLESTARTBLOCK) {
  68                iomap->addr = IOMAP_NULL_ADDR;
  69                iomap->type = IOMAP_HOLE;
  70        } else if (imap->br_startblock == DELAYSTARTBLOCK ||
  71                   isnullstartblock(imap->br_startblock)) {
  72                iomap->addr = IOMAP_NULL_ADDR;
  73                iomap->type = IOMAP_DELALLOC;
  74        } else {
  75                iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
  76                if (imap->br_state == XFS_EXT_UNWRITTEN)
  77                        iomap->type = IOMAP_UNWRITTEN;
  78                else
  79                        iomap->type = IOMAP_MAPPED;
  80        }
  81        iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
  82        iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
  83        iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
  84        iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
  85
  86        if (xfs_ipincount(ip) &&
  87            (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
  88                iomap->flags |= IOMAP_F_DIRTY;
  89        if (shared)
  90                iomap->flags |= IOMAP_F_SHARED;
  91        return 0;
  92}
  93
  94static void
  95xfs_hole_to_iomap(
  96        struct xfs_inode        *ip,
  97        struct iomap            *iomap,
  98        xfs_fileoff_t           offset_fsb,
  99        xfs_fileoff_t           end_fsb)
 100{
 101        iomap->addr = IOMAP_NULL_ADDR;
 102        iomap->type = IOMAP_HOLE;
 103        iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
 104        iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
 105        iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
 106        iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
 107}
 108
 109xfs_extlen_t
 110xfs_eof_alignment(
 111        struct xfs_inode        *ip,
 112        xfs_extlen_t            extsize)
 113{
 114        struct xfs_mount        *mp = ip->i_mount;
 115        xfs_extlen_t            align = 0;
 116
 117        if (!XFS_IS_REALTIME_INODE(ip)) {
 118                /*
 119                 * Round up the allocation request to a stripe unit
 120                 * (m_dalign) boundary if the file size is >= stripe unit
 121                 * size, and we are allocating past the allocation eof.
 122                 *
 123                 * If mounted with the "-o swalloc" option the alignment is
 124                 * increased from the strip unit size to the stripe width.
 125                 */
 126                if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
 127                        align = mp->m_swidth;
 128                else if (mp->m_dalign)
 129                        align = mp->m_dalign;
 130
 131                if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
 132                        align = 0;
 133        }
 134
 135        /*
 136         * Always round up the allocation request to an extent boundary
 137         * (when file on a real-time subvolume or has di_extsize hint).
 138         */
 139        if (extsize) {
 140                if (align)
 141                        align = roundup_64(align, extsize);
 142                else
 143                        align = extsize;
 144        }
 145
 146        return align;
 147}
 148
 149STATIC int
 150xfs_iomap_eof_align_last_fsb(
 151        struct xfs_inode        *ip,
 152        xfs_extlen_t            extsize,
 153        xfs_fileoff_t           *last_fsb)
 154{
 155        xfs_extlen_t            align = xfs_eof_alignment(ip, extsize);
 156
 157        if (align) {
 158                xfs_fileoff_t   new_last_fsb = roundup_64(*last_fsb, align);
 159                int             eof, error;
 160
 161                error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
 162                if (error)
 163                        return error;
 164                if (eof)
 165                        *last_fsb = new_last_fsb;
 166        }
 167        return 0;
 168}
 169
 170int
 171xfs_iomap_write_direct(
 172        xfs_inode_t     *ip,
 173        xfs_off_t       offset,
 174        size_t          count,
 175        xfs_bmbt_irec_t *imap,
 176        int             nmaps)
 177{
 178        xfs_mount_t     *mp = ip->i_mount;
 179        xfs_fileoff_t   offset_fsb;
 180        xfs_fileoff_t   last_fsb;
 181        xfs_filblks_t   count_fsb, resaligned;
 182        xfs_extlen_t    extsz;
 183        int             nimaps;
 184        int             quota_flag;
 185        int             rt;
 186        xfs_trans_t     *tp;
 187        uint            qblocks, resblks, resrtextents;
 188        int             error;
 189        int             lockmode;
 190        int             bmapi_flags = XFS_BMAPI_PREALLOC;
 191        uint            tflags = 0;
 192
 193        rt = XFS_IS_REALTIME_INODE(ip);
 194        extsz = xfs_get_extsz_hint(ip);
 195        lockmode = XFS_ILOCK_SHARED;    /* locked by caller */
 196
 197        ASSERT(xfs_isilocked(ip, lockmode));
 198
 199        offset_fsb = XFS_B_TO_FSBT(mp, offset);
 200        last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
 201        if ((offset + count) > XFS_ISIZE(ip)) {
 202                /*
 203                 * Assert that the in-core extent list is present since this can
 204                 * call xfs_iread_extents() and we only have the ilock shared.
 205                 * This should be safe because the lock was held around a bmapi
 206                 * call in the caller and we only need it to access the in-core
 207                 * list.
 208                 */
 209                ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
 210                                                                XFS_IFEXTENTS);
 211                error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
 212                if (error)
 213                        goto out_unlock;
 214        } else {
 215                if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
 216                        last_fsb = min(last_fsb, (xfs_fileoff_t)
 217                                        imap->br_blockcount +
 218                                        imap->br_startoff);
 219        }
 220        count_fsb = last_fsb - offset_fsb;
 221        ASSERT(count_fsb > 0);
 222        resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
 223
 224        if (unlikely(rt)) {
 225                resrtextents = qblocks = resaligned;
 226                resrtextents /= mp->m_sb.sb_rextsize;
 227                resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 228                quota_flag = XFS_QMOPT_RES_RTBLKS;
 229        } else {
 230                resrtextents = 0;
 231                resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
 232                quota_flag = XFS_QMOPT_RES_REGBLKS;
 233        }
 234
 235        /*
 236         * Drop the shared lock acquired by the caller, attach the dquot if
 237         * necessary and move on to transaction setup.
 238         */
 239        xfs_iunlock(ip, lockmode);
 240        error = xfs_qm_dqattach(ip);
 241        if (error)
 242                return error;
 243
 244        /*
 245         * For DAX, we do not allocate unwritten extents, but instead we zero
 246         * the block before we commit the transaction.  Ideally we'd like to do
 247         * this outside the transaction context, but if we commit and then crash
 248         * we may not have zeroed the blocks and this will be exposed on
 249         * recovery of the allocation. Hence we must zero before commit.
 250         *
 251         * Further, if we are mapping unwritten extents here, we need to zero
 252         * and convert them to written so that we don't need an unwritten extent
 253         * callback for DAX. This also means that we need to be able to dip into
 254         * the reserve block pool for bmbt block allocation if there is no space
 255         * left but we need to do unwritten extent conversion.
 256         */
 257        if (IS_DAX(VFS_I(ip))) {
 258                bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
 259                if (imap->br_state == XFS_EXT_UNWRITTEN) {
 260                        tflags |= XFS_TRANS_RESERVE;
 261                        resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
 262                }
 263        }
 264        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
 265                        tflags, &tp);
 266        if (error)
 267                return error;
 268
 269        lockmode = XFS_ILOCK_EXCL;
 270        xfs_ilock(ip, lockmode);
 271
 272        error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
 273        if (error)
 274                goto out_trans_cancel;
 275
 276        xfs_trans_ijoin(tp, ip, 0);
 277
 278        /*
 279         * From this point onwards we overwrite the imap pointer that the
 280         * caller gave to us.
 281         */
 282        nimaps = 1;
 283        error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
 284                                bmapi_flags, resblks, imap, &nimaps);
 285        if (error)
 286                goto out_res_cancel;
 287
 288        /*
 289         * Complete the transaction
 290         */
 291        error = xfs_trans_commit(tp);
 292        if (error)
 293                goto out_unlock;
 294
 295        /*
 296         * Copy any maps to caller's array and return any error.
 297         */
 298        if (nimaps == 0) {
 299                error = -ENOSPC;
 300                goto out_unlock;
 301        }
 302
 303        if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
 304                error = xfs_alert_fsblock_zero(ip, imap);
 305
 306out_unlock:
 307        xfs_iunlock(ip, lockmode);
 308        return error;
 309
 310out_res_cancel:
 311        xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
 312out_trans_cancel:
 313        xfs_trans_cancel(tp);
 314        goto out_unlock;
 315}
 316
 317STATIC bool
 318xfs_quota_need_throttle(
 319        struct xfs_inode *ip,
 320        int type,
 321        xfs_fsblock_t alloc_blocks)
 322{
 323        struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
 324
 325        if (!dq || !xfs_this_quota_on(ip->i_mount, type))
 326                return false;
 327
 328        /* no hi watermark, no throttle */
 329        if (!dq->q_prealloc_hi_wmark)
 330                return false;
 331
 332        /* under the lo watermark, no throttle */
 333        if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
 334                return false;
 335
 336        return true;
 337}
 338
 339STATIC void
 340xfs_quota_calc_throttle(
 341        struct xfs_inode *ip,
 342        int type,
 343        xfs_fsblock_t *qblocks,
 344        int *qshift,
 345        int64_t *qfreesp)
 346{
 347        int64_t freesp;
 348        int shift = 0;
 349        struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
 350
 351        /* no dq, or over hi wmark, squash the prealloc completely */
 352        if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
 353                *qblocks = 0;
 354                *qfreesp = 0;
 355                return;
 356        }
 357
 358        freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
 359        if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
 360                shift = 2;
 361                if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
 362                        shift += 2;
 363                if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
 364                        shift += 2;
 365        }
 366
 367        if (freesp < *qfreesp)
 368                *qfreesp = freesp;
 369
 370        /* only overwrite the throttle values if we are more aggressive */
 371        if ((freesp >> shift) < (*qblocks >> *qshift)) {
 372                *qblocks = freesp;
 373                *qshift = shift;
 374        }
 375}
 376
 377/*
 378 * If we are doing a write at the end of the file and there are no allocations
 379 * past this one, then extend the allocation out to the file system's write
 380 * iosize.
 381 *
 382 * If we don't have a user specified preallocation size, dynamically increase
 383 * the preallocation size as the size of the file grows.  Cap the maximum size
 384 * at a single extent or less if the filesystem is near full. The closer the
 385 * filesystem is to full, the smaller the maximum prealocation.
 386 *
 387 * As an exception we don't do any preallocation at all if the file is smaller
 388 * than the minimum preallocation and we are using the default dynamic
 389 * preallocation scheme, as it is likely this is the only write to the file that
 390 * is going to be done.
 391 *
 392 * We clean up any extra space left over when the file is closed in
 393 * xfs_inactive().
 394 */
 395STATIC xfs_fsblock_t
 396xfs_iomap_prealloc_size(
 397        struct xfs_inode        *ip,
 398        int                     whichfork,
 399        loff_t                  offset,
 400        loff_t                  count,
 401        struct xfs_iext_cursor  *icur)
 402{
 403        struct xfs_mount        *mp = ip->i_mount;
 404        struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
 405        xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
 406        struct xfs_bmbt_irec    prev;
 407        int                     shift = 0;
 408        int64_t                 freesp;
 409        xfs_fsblock_t           qblocks;
 410        int                     qshift = 0;
 411        xfs_fsblock_t           alloc_blocks = 0;
 412
 413        if (offset + count <= XFS_ISIZE(ip))
 414                return 0;
 415
 416        if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
 417            (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
 418                return 0;
 419
 420        /*
 421         * If an explicit allocsize is set, the file is small, or we
 422         * are writing behind a hole, then use the minimum prealloc:
 423         */
 424        if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
 425            XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
 426            !xfs_iext_peek_prev_extent(ifp, icur, &prev) ||
 427            prev.br_startoff + prev.br_blockcount < offset_fsb)
 428                return mp->m_writeio_blocks;
 429
 430        /*
 431         * Determine the initial size of the preallocation. We are beyond the
 432         * current EOF here, but we need to take into account whether this is
 433         * a sparse write or an extending write when determining the
 434         * preallocation size.  Hence we need to look up the extent that ends
 435         * at the current write offset and use the result to determine the
 436         * preallocation size.
 437         *
 438         * If the extent is a hole, then preallocation is essentially disabled.
 439         * Otherwise we take the size of the preceding data extent as the basis
 440         * for the preallocation size. If the size of the extent is greater than
 441         * half the maximum extent length, then use the current offset as the
 442         * basis. This ensures that for large files the preallocation size
 443         * always extends to MAXEXTLEN rather than falling short due to things
 444         * like stripe unit/width alignment of real extents.
 445         */
 446        if (prev.br_blockcount <= (MAXEXTLEN >> 1))
 447                alloc_blocks = prev.br_blockcount << 1;
 448        else
 449                alloc_blocks = XFS_B_TO_FSB(mp, offset);
 450        if (!alloc_blocks)
 451                goto check_writeio;
 452        qblocks = alloc_blocks;
 453
 454        /*
 455         * MAXEXTLEN is not a power of two value but we round the prealloc down
 456         * to the nearest power of two value after throttling. To prevent the
 457         * round down from unconditionally reducing the maximum supported prealloc
 458         * size, we round up first, apply appropriate throttling, round down and
 459         * cap the value to MAXEXTLEN.
 460         */
 461        alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
 462                                       alloc_blocks);
 463
 464        freesp = percpu_counter_read_positive(&mp->m_fdblocks);
 465        if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
 466                shift = 2;
 467                if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
 468                        shift++;
 469                if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
 470                        shift++;
 471                if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
 472                        shift++;
 473                if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
 474                        shift++;
 475        }
 476
 477        /*
 478         * Check each quota to cap the prealloc size, provide a shift value to
 479         * throttle with and adjust amount of available space.
 480         */
 481        if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
 482                xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
 483                                        &freesp);
 484        if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
 485                xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
 486                                        &freesp);
 487        if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
 488                xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
 489                                        &freesp);
 490
 491        /*
 492         * The final prealloc size is set to the minimum of free space available
 493         * in each of the quotas and the overall filesystem.
 494         *
 495         * The shift throttle value is set to the maximum value as determined by
 496         * the global low free space values and per-quota low free space values.
 497         */
 498        alloc_blocks = min(alloc_blocks, qblocks);
 499        shift = max(shift, qshift);
 500
 501        if (shift)
 502                alloc_blocks >>= shift;
 503        /*
 504         * rounddown_pow_of_two() returns an undefined result if we pass in
 505         * alloc_blocks = 0.
 506         */
 507        if (alloc_blocks)
 508                alloc_blocks = rounddown_pow_of_two(alloc_blocks);
 509        if (alloc_blocks > MAXEXTLEN)
 510                alloc_blocks = MAXEXTLEN;
 511
 512        /*
 513         * If we are still trying to allocate more space than is
 514         * available, squash the prealloc hard. This can happen if we
 515         * have a large file on a small filesystem and the above
 516         * lowspace thresholds are smaller than MAXEXTLEN.
 517         */
 518        while (alloc_blocks && alloc_blocks >= freesp)
 519                alloc_blocks >>= 4;
 520check_writeio:
 521        if (alloc_blocks < mp->m_writeio_blocks)
 522                alloc_blocks = mp->m_writeio_blocks;
 523        trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
 524                                      mp->m_writeio_blocks);
 525        return alloc_blocks;
 526}
 527
 528static int
 529xfs_file_iomap_begin_delay(
 530        struct inode            *inode,
 531        loff_t                  offset,
 532        loff_t                  count,
 533        unsigned                flags,
 534        struct iomap            *iomap)
 535{
 536        struct xfs_inode        *ip = XFS_I(inode);
 537        struct xfs_mount        *mp = ip->i_mount;
 538        xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
 539        xfs_fileoff_t           maxbytes_fsb =
 540                XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 541        xfs_fileoff_t           end_fsb;
 542        struct xfs_bmbt_irec    imap, cmap;
 543        struct xfs_iext_cursor  icur, ccur;
 544        xfs_fsblock_t           prealloc_blocks = 0;
 545        bool                    eof = false, cow_eof = false, shared = false;
 546        int                     whichfork = XFS_DATA_FORK;
 547        int                     error = 0;
 548
 549        ASSERT(!XFS_IS_REALTIME_INODE(ip));
 550        ASSERT(!xfs_get_extsz_hint(ip));
 551
 552        xfs_ilock(ip, XFS_ILOCK_EXCL);
 553
 554        if (unlikely(XFS_TEST_ERROR(
 555            (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
 556             XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
 557             mp, XFS_ERRTAG_BMAPIFORMAT))) {
 558                XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
 559                error = -EFSCORRUPTED;
 560                goto out_unlock;
 561        }
 562
 563        XFS_STATS_INC(mp, xs_blk_mapw);
 564
 565        if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
 566                error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
 567                if (error)
 568                        goto out_unlock;
 569        }
 570
 571        end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
 572
 573        /*
 574         * Search the data fork fork first to look up our source mapping.  We
 575         * always need the data fork map, as we have to return it to the
 576         * iomap code so that the higher level write code can read data in to
 577         * perform read-modify-write cycles for unaligned writes.
 578         */
 579        eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
 580        if (eof)
 581                imap.br_startoff = end_fsb; /* fake hole until the end */
 582
 583        /* We never need to allocate blocks for zeroing a hole. */
 584        if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
 585                xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
 586                goto out_unlock;
 587        }
 588
 589        /*
 590         * Search the COW fork extent list even if we did not find a data fork
 591         * extent.  This serves two purposes: first this implements the
 592         * speculative preallocation using cowextsize, so that we also unshare
 593         * block adjacent to shared blocks instead of just the shared blocks
 594         * themselves.  Second the lookup in the extent list is generally faster
 595         * than going out to the shared extent tree.
 596         */
 597        if (xfs_is_cow_inode(ip)) {
 598                if (!ip->i_cowfp) {
 599                        ASSERT(!xfs_is_reflink_inode(ip));
 600                        xfs_ifork_init_cow(ip);
 601                }
 602                cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
 603                                &ccur, &cmap);
 604                if (!cow_eof && cmap.br_startoff <= offset_fsb) {
 605                        trace_xfs_reflink_cow_found(ip, &cmap);
 606                        whichfork = XFS_COW_FORK;
 607                        goto done;
 608                }
 609        }
 610
 611        if (imap.br_startoff <= offset_fsb) {
 612                /*
 613                 * For reflink files we may need a delalloc reservation when
 614                 * overwriting shared extents.   This includes zeroing of
 615                 * existing extents that contain data.
 616                 */
 617                if (!xfs_is_cow_inode(ip) ||
 618                    ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
 619                        trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
 620                                        &imap);
 621                        goto done;
 622                }
 623
 624                xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
 625
 626                /* Trim the mapping to the nearest shared extent boundary. */
 627                error = xfs_inode_need_cow(ip, &imap, &shared);
 628                if (error)
 629                        goto out_unlock;
 630
 631                /* Not shared?  Just report the (potentially capped) extent. */
 632                if (!shared) {
 633                        trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
 634                                        &imap);
 635                        goto done;
 636                }
 637
 638                /*
 639                 * Fork all the shared blocks from our write offset until the
 640                 * end of the extent.
 641                 */
 642                whichfork = XFS_COW_FORK;
 643                end_fsb = imap.br_startoff + imap.br_blockcount;
 644        } else {
 645                /*
 646                 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
 647                 * pages to keep the chunks of work done where somewhat
 648                 * symmetric with the work writeback does.  This is a completely
 649                 * arbitrary number pulled out of thin air.
 650                 *
 651                 * Note that the values needs to be less than 32-bits wide until
 652                 * the lower level functions are updated.
 653                 */
 654                count = min_t(loff_t, count, 1024 * PAGE_SIZE);
 655                end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
 656
 657                if (xfs_is_always_cow_inode(ip))
 658                        whichfork = XFS_COW_FORK;
 659        }
 660
 661        error = xfs_qm_dqattach_locked(ip, false);
 662        if (error)
 663                goto out_unlock;
 664
 665        if (eof) {
 666                prealloc_blocks = xfs_iomap_prealloc_size(ip, whichfork, offset,
 667                                count, &icur);
 668                if (prealloc_blocks) {
 669                        xfs_extlen_t    align;
 670                        xfs_off_t       end_offset;
 671                        xfs_fileoff_t   p_end_fsb;
 672
 673                        end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
 674                        p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
 675                                        prealloc_blocks;
 676
 677                        align = xfs_eof_alignment(ip, 0);
 678                        if (align)
 679                                p_end_fsb = roundup_64(p_end_fsb, align);
 680
 681                        p_end_fsb = min(p_end_fsb, maxbytes_fsb);
 682                        ASSERT(p_end_fsb > offset_fsb);
 683                        prealloc_blocks = p_end_fsb - end_fsb;
 684                }
 685        }
 686
 687retry:
 688        error = xfs_bmapi_reserve_delalloc(ip, whichfork, offset_fsb,
 689                        end_fsb - offset_fsb, prealloc_blocks,
 690                        whichfork == XFS_DATA_FORK ? &imap : &cmap,
 691                        whichfork == XFS_DATA_FORK ? &icur : &ccur,
 692                        whichfork == XFS_DATA_FORK ? eof : cow_eof);
 693        switch (error) {
 694        case 0:
 695                break;
 696        case -ENOSPC:
 697        case -EDQUOT:
 698                /* retry without any preallocation */
 699                trace_xfs_delalloc_enospc(ip, offset, count);
 700                if (prealloc_blocks) {
 701                        prealloc_blocks = 0;
 702                        goto retry;
 703                }
 704                /*FALLTHRU*/
 705        default:
 706                goto out_unlock;
 707        }
 708
 709        /*
 710         * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
 711         * them out if the write happens to fail.
 712         */
 713        iomap->flags |= IOMAP_F_NEW;
 714        trace_xfs_iomap_alloc(ip, offset, count, whichfork,
 715                        whichfork == XFS_DATA_FORK ? &imap : &cmap);
 716done:
 717        if (whichfork == XFS_COW_FORK) {
 718                if (imap.br_startoff > offset_fsb) {
 719                        xfs_trim_extent(&cmap, offset_fsb,
 720                                        imap.br_startoff - offset_fsb);
 721                        error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
 722                        goto out_unlock;
 723                }
 724                /* ensure we only report blocks we have a reservation for */
 725                xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
 726                shared = true;
 727        }
 728        error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
 729out_unlock:
 730        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 731        return error;
 732}
 733
 734int
 735xfs_iomap_write_unwritten(
 736        xfs_inode_t     *ip,
 737        xfs_off_t       offset,
 738        xfs_off_t       count,
 739        bool            update_isize)
 740{
 741        xfs_mount_t     *mp = ip->i_mount;
 742        xfs_fileoff_t   offset_fsb;
 743        xfs_filblks_t   count_fsb;
 744        xfs_filblks_t   numblks_fsb;
 745        int             nimaps;
 746        xfs_trans_t     *tp;
 747        xfs_bmbt_irec_t imap;
 748        struct inode    *inode = VFS_I(ip);
 749        xfs_fsize_t     i_size;
 750        uint            resblks;
 751        int             error;
 752
 753        trace_xfs_unwritten_convert(ip, offset, count);
 754
 755        offset_fsb = XFS_B_TO_FSBT(mp, offset);
 756        count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
 757        count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
 758
 759        /*
 760         * Reserve enough blocks in this transaction for two complete extent
 761         * btree splits.  We may be converting the middle part of an unwritten
 762         * extent and in this case we will insert two new extents in the btree
 763         * each of which could cause a full split.
 764         *
 765         * This reservation amount will be used in the first call to
 766         * xfs_bmbt_split() to select an AG with enough space to satisfy the
 767         * rest of the operation.
 768         */
 769        resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
 770
 771        /* Attach dquots so that bmbt splits are accounted correctly. */
 772        error = xfs_qm_dqattach(ip);
 773        if (error)
 774                return error;
 775
 776        do {
 777                /*
 778                 * Set up a transaction to convert the range of extents
 779                 * from unwritten to real. Do allocations in a loop until
 780                 * we have covered the range passed in.
 781                 *
 782                 * Note that we can't risk to recursing back into the filesystem
 783                 * here as we might be asked to write out the same inode that we
 784                 * complete here and might deadlock on the iolock.
 785                 */
 786                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
 787                                XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
 788                if (error)
 789                        return error;
 790
 791                xfs_ilock(ip, XFS_ILOCK_EXCL);
 792                xfs_trans_ijoin(tp, ip, 0);
 793
 794                error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
 795                                XFS_QMOPT_RES_REGBLKS);
 796                if (error)
 797                        goto error_on_bmapi_transaction;
 798
 799                /*
 800                 * Modify the unwritten extent state of the buffer.
 801                 */
 802                nimaps = 1;
 803                error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
 804                                        XFS_BMAPI_CONVERT, resblks, &imap,
 805                                        &nimaps);
 806                if (error)
 807                        goto error_on_bmapi_transaction;
 808
 809                /*
 810                 * Log the updated inode size as we go.  We have to be careful
 811                 * to only log it up to the actual write offset if it is
 812                 * halfway into a block.
 813                 */
 814                i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
 815                if (i_size > offset + count)
 816                        i_size = offset + count;
 817                if (update_isize && i_size > i_size_read(inode))
 818                        i_size_write(inode, i_size);
 819                i_size = xfs_new_eof(ip, i_size);
 820                if (i_size) {
 821                        ip->i_d.di_size = i_size;
 822                        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 823                }
 824
 825                error = xfs_trans_commit(tp);
 826                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 827                if (error)
 828                        return error;
 829
 830                if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
 831                        return xfs_alert_fsblock_zero(ip, &imap);
 832
 833                if ((numblks_fsb = imap.br_blockcount) == 0) {
 834                        /*
 835                         * The numblks_fsb value should always get
 836                         * smaller, otherwise the loop is stuck.
 837                         */
 838                        ASSERT(imap.br_blockcount);
 839                        break;
 840                }
 841                offset_fsb += numblks_fsb;
 842                count_fsb -= numblks_fsb;
 843        } while (count_fsb > 0);
 844
 845        return 0;
 846
 847error_on_bmapi_transaction:
 848        xfs_trans_cancel(tp);
 849        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 850        return error;
 851}
 852
 853static inline bool
 854imap_needs_alloc(
 855        struct inode            *inode,
 856        struct xfs_bmbt_irec    *imap,
 857        int                     nimaps)
 858{
 859        return !nimaps ||
 860                imap->br_startblock == HOLESTARTBLOCK ||
 861                imap->br_startblock == DELAYSTARTBLOCK ||
 862                (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
 863}
 864
 865static inline bool
 866needs_cow_for_zeroing(
 867        struct xfs_bmbt_irec    *imap,
 868        int                     nimaps)
 869{
 870        return nimaps &&
 871                imap->br_startblock != HOLESTARTBLOCK &&
 872                imap->br_state != XFS_EXT_UNWRITTEN;
 873}
 874
 875static int
 876xfs_ilock_for_iomap(
 877        struct xfs_inode        *ip,
 878        unsigned                flags,
 879        unsigned                *lockmode)
 880{
 881        unsigned                mode = XFS_ILOCK_SHARED;
 882        bool                    is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
 883
 884        /*
 885         * COW writes may allocate delalloc space or convert unwritten COW
 886         * extents, so we need to make sure to take the lock exclusively here.
 887         */
 888        if (xfs_is_cow_inode(ip) && is_write) {
 889                /*
 890                 * FIXME: It could still overwrite on unshared extents and not
 891                 * need allocation.
 892                 */
 893                if (flags & IOMAP_NOWAIT)
 894                        return -EAGAIN;
 895                mode = XFS_ILOCK_EXCL;
 896        }
 897
 898        /*
 899         * Extents not yet cached requires exclusive access, don't block.  This
 900         * is an opencoded xfs_ilock_data_map_shared() call but with
 901         * non-blocking behaviour.
 902         */
 903        if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
 904                if (flags & IOMAP_NOWAIT)
 905                        return -EAGAIN;
 906                mode = XFS_ILOCK_EXCL;
 907        }
 908
 909relock:
 910        if (flags & IOMAP_NOWAIT) {
 911                if (!xfs_ilock_nowait(ip, mode))
 912                        return -EAGAIN;
 913        } else {
 914                xfs_ilock(ip, mode);
 915        }
 916
 917        /*
 918         * The reflink iflag could have changed since the earlier unlocked
 919         * check, so if we got ILOCK_SHARED for a write and but we're now a
 920         * reflink inode we have to switch to ILOCK_EXCL and relock.
 921         */
 922        if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
 923                xfs_iunlock(ip, mode);
 924                mode = XFS_ILOCK_EXCL;
 925                goto relock;
 926        }
 927
 928        *lockmode = mode;
 929        return 0;
 930}
 931
 932static int
 933xfs_file_iomap_begin(
 934        struct inode            *inode,
 935        loff_t                  offset,
 936        loff_t                  length,
 937        unsigned                flags,
 938        struct iomap            *iomap)
 939{
 940        struct xfs_inode        *ip = XFS_I(inode);
 941        struct xfs_mount        *mp = ip->i_mount;
 942        struct xfs_bmbt_irec    imap;
 943        xfs_fileoff_t           offset_fsb, end_fsb;
 944        int                     nimaps = 1, error = 0;
 945        bool                    shared = false;
 946        unsigned                lockmode;
 947
 948        if (XFS_FORCED_SHUTDOWN(mp))
 949                return -EIO;
 950
 951        if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) &&
 952                        !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
 953                /* Reserve delalloc blocks for regular writeback. */
 954                return xfs_file_iomap_begin_delay(inode, offset, length, flags,
 955                                iomap);
 956        }
 957
 958        /*
 959         * Lock the inode in the manner required for the specified operation and
 960         * check for as many conditions that would result in blocking as
 961         * possible. This removes most of the non-blocking checks from the
 962         * mapping code below.
 963         */
 964        error = xfs_ilock_for_iomap(ip, flags, &lockmode);
 965        if (error)
 966                return error;
 967
 968        ASSERT(offset <= mp->m_super->s_maxbytes);
 969        if (offset > mp->m_super->s_maxbytes - length)
 970                length = mp->m_super->s_maxbytes - offset;
 971        offset_fsb = XFS_B_TO_FSBT(mp, offset);
 972        end_fsb = XFS_B_TO_FSB(mp, offset + length);
 973
 974        error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
 975                               &nimaps, 0);
 976        if (error)
 977                goto out_unlock;
 978
 979        if (flags & IOMAP_REPORT) {
 980                /* Trim the mapping to the nearest shared extent boundary. */
 981                error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
 982                if (error)
 983                        goto out_unlock;
 984        }
 985
 986        /* Non-modifying mapping requested, so we are done */
 987        if (!(flags & (IOMAP_WRITE | IOMAP_ZERO)))
 988                goto out_found;
 989
 990        /*
 991         * Break shared extents if necessary. Checks for non-blocking IO have
 992         * been done up front, so we don't need to do them here.
 993         */
 994        if (xfs_is_cow_inode(ip)) {
 995                struct xfs_bmbt_irec    cmap;
 996                bool                    directio = (flags & IOMAP_DIRECT);
 997
 998                /* if zeroing doesn't need COW allocation, then we are done. */
 999                if ((flags & IOMAP_ZERO) &&
1000                    !needs_cow_for_zeroing(&imap, nimaps))
1001                        goto out_found;
1002
1003                /* may drop and re-acquire the ilock */
1004                cmap = imap;
1005                error = xfs_reflink_allocate_cow(ip, &cmap, &shared, &lockmode,
1006                                directio);
1007                if (error)
1008                        goto out_unlock;
1009
1010                /*
1011                 * For buffered writes we need to report the address of the
1012                 * previous block (if there was any) so that the higher level
1013                 * write code can perform read-modify-write operations; we
1014                 * won't need the CoW fork mapping until writeback.  For direct
1015                 * I/O, which must be block aligned, we need to report the
1016                 * newly allocated address.  If the data fork has a hole, copy
1017                 * the COW fork mapping to avoid allocating to the data fork.
1018                 */
1019                if (directio || imap.br_startblock == HOLESTARTBLOCK)
1020                        imap = cmap;
1021
1022                end_fsb = imap.br_startoff + imap.br_blockcount;
1023                length = XFS_FSB_TO_B(mp, end_fsb) - offset;
1024        }
1025
1026        /* Don't need to allocate over holes when doing zeroing operations. */
1027        if (flags & IOMAP_ZERO)
1028                goto out_found;
1029
1030        if (!imap_needs_alloc(inode, &imap, nimaps))
1031                goto out_found;
1032
1033        /* If nowait is set bail since we are going to make allocations. */
1034        if (flags & IOMAP_NOWAIT) {
1035                error = -EAGAIN;
1036                goto out_unlock;
1037        }
1038
1039        /*
1040         * We cap the maximum length we map to a sane size  to keep the chunks
1041         * of work done where somewhat symmetric with the work writeback does.
1042         * This is a completely arbitrary number pulled out of thin air as a
1043         * best guess for initial testing.
1044         *
1045         * Note that the values needs to be less than 32-bits wide until the
1046         * lower level functions are updated.
1047         */
1048        length = min_t(loff_t, length, 1024 * PAGE_SIZE);
1049
1050        /*
1051         * xfs_iomap_write_direct() expects the shared lock. It is unlocked on
1052         * return.
1053         */
1054        if (lockmode == XFS_ILOCK_EXCL)
1055                xfs_ilock_demote(ip, lockmode);
1056        error = xfs_iomap_write_direct(ip, offset, length, &imap,
1057                        nimaps);
1058        if (error)
1059                return error;
1060
1061        iomap->flags |= IOMAP_F_NEW;
1062        trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
1063
1064out_finish:
1065        return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
1066
1067out_found:
1068        ASSERT(nimaps);
1069        xfs_iunlock(ip, lockmode);
1070        trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
1071        goto out_finish;
1072
1073out_unlock:
1074        xfs_iunlock(ip, lockmode);
1075        return error;
1076}
1077
1078static int
1079xfs_file_iomap_end_delalloc(
1080        struct xfs_inode        *ip,
1081        loff_t                  offset,
1082        loff_t                  length,
1083        ssize_t                 written,
1084        struct iomap            *iomap)
1085{
1086        struct xfs_mount        *mp = ip->i_mount;
1087        xfs_fileoff_t           start_fsb;
1088        xfs_fileoff_t           end_fsb;
1089        int                     error = 0;
1090
1091        /*
1092         * Behave as if the write failed if drop writes is enabled. Set the NEW
1093         * flag to force delalloc cleanup.
1094         */
1095        if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
1096                iomap->flags |= IOMAP_F_NEW;
1097                written = 0;
1098        }
1099
1100        /*
1101         * start_fsb refers to the first unused block after a short write. If
1102         * nothing was written, round offset down to point at the first block in
1103         * the range.
1104         */
1105        if (unlikely(!written))
1106                start_fsb = XFS_B_TO_FSBT(mp, offset);
1107        else
1108                start_fsb = XFS_B_TO_FSB(mp, offset + written);
1109        end_fsb = XFS_B_TO_FSB(mp, offset + length);
1110
1111        /*
1112         * Trim delalloc blocks if they were allocated by this write and we
1113         * didn't manage to write the whole range.
1114         *
1115         * We don't need to care about racing delalloc as we hold i_mutex
1116         * across the reserve/allocate/unreserve calls. If there are delalloc
1117         * blocks in the range, they are ours.
1118         */
1119        if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
1120                truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
1121                                         XFS_FSB_TO_B(mp, end_fsb) - 1);
1122
1123                error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1124                                               end_fsb - start_fsb);
1125                if (error && !XFS_FORCED_SHUTDOWN(mp)) {
1126                        xfs_alert(mp, "%s: unable to clean up ino %lld",
1127                                __func__, ip->i_ino);
1128                        return error;
1129                }
1130        }
1131
1132        return 0;
1133}
1134
1135static int
1136xfs_file_iomap_end(
1137        struct inode            *inode,
1138        loff_t                  offset,
1139        loff_t                  length,
1140        ssize_t                 written,
1141        unsigned                flags,
1142        struct iomap            *iomap)
1143{
1144        if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1145                return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1146                                length, written, iomap);
1147        return 0;
1148}
1149
1150const struct iomap_ops xfs_iomap_ops = {
1151        .iomap_begin            = xfs_file_iomap_begin,
1152        .iomap_end              = xfs_file_iomap_end,
1153};
1154
1155static int
1156xfs_seek_iomap_begin(
1157        struct inode            *inode,
1158        loff_t                  offset,
1159        loff_t                  length,
1160        unsigned                flags,
1161        struct iomap            *iomap)
1162{
1163        struct xfs_inode        *ip = XFS_I(inode);
1164        struct xfs_mount        *mp = ip->i_mount;
1165        xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
1166        xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
1167        xfs_fileoff_t           cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
1168        struct xfs_iext_cursor  icur;
1169        struct xfs_bmbt_irec    imap, cmap;
1170        int                     error = 0;
1171        unsigned                lockmode;
1172
1173        if (XFS_FORCED_SHUTDOWN(mp))
1174                return -EIO;
1175
1176        lockmode = xfs_ilock_data_map_shared(ip);
1177        if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
1178                error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
1179                if (error)
1180                        goto out_unlock;
1181        }
1182
1183        if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
1184                /*
1185                 * If we found a data extent we are done.
1186                 */
1187                if (imap.br_startoff <= offset_fsb)
1188                        goto done;
1189                data_fsb = imap.br_startoff;
1190        } else {
1191                /*
1192                 * Fake a hole until the end of the file.
1193                 */
1194                data_fsb = min(XFS_B_TO_FSB(mp, offset + length),
1195                               XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
1196        }
1197
1198        /*
1199         * If a COW fork extent covers the hole, report it - capped to the next
1200         * data fork extent:
1201         */
1202        if (xfs_inode_has_cow_data(ip) &&
1203            xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
1204                cow_fsb = cmap.br_startoff;
1205        if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
1206                if (data_fsb < cow_fsb + cmap.br_blockcount)
1207                        end_fsb = min(end_fsb, data_fsb);
1208                xfs_trim_extent(&cmap, offset_fsb, end_fsb);
1209                error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
1210                /*
1211                 * This is a COW extent, so we must probe the page cache
1212                 * because there could be dirty page cache being backed
1213                 * by this extent.
1214                 */
1215                iomap->type = IOMAP_UNWRITTEN;
1216                goto out_unlock;
1217        }
1218
1219        /*
1220         * Else report a hole, capped to the next found data or COW extent.
1221         */
1222        if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
1223                imap.br_blockcount = cow_fsb - offset_fsb;
1224        else
1225                imap.br_blockcount = data_fsb - offset_fsb;
1226        imap.br_startoff = offset_fsb;
1227        imap.br_startblock = HOLESTARTBLOCK;
1228        imap.br_state = XFS_EXT_NORM;
1229done:
1230        xfs_trim_extent(&imap, offset_fsb, end_fsb);
1231        error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
1232out_unlock:
1233        xfs_iunlock(ip, lockmode);
1234        return error;
1235}
1236
1237const struct iomap_ops xfs_seek_iomap_ops = {
1238        .iomap_begin            = xfs_seek_iomap_begin,
1239};
1240
1241static int
1242xfs_xattr_iomap_begin(
1243        struct inode            *inode,
1244        loff_t                  offset,
1245        loff_t                  length,
1246        unsigned                flags,
1247        struct iomap            *iomap)
1248{
1249        struct xfs_inode        *ip = XFS_I(inode);
1250        struct xfs_mount        *mp = ip->i_mount;
1251        xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
1252        xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
1253        struct xfs_bmbt_irec    imap;
1254        int                     nimaps = 1, error = 0;
1255        unsigned                lockmode;
1256
1257        if (XFS_FORCED_SHUTDOWN(mp))
1258                return -EIO;
1259
1260        lockmode = xfs_ilock_attr_map_shared(ip);
1261
1262        /* if there are no attribute fork or extents, return ENOENT */
1263        if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1264                error = -ENOENT;
1265                goto out_unlock;
1266        }
1267
1268        ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1269        error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1270                               &nimaps, XFS_BMAPI_ATTRFORK);
1271out_unlock:
1272        xfs_iunlock(ip, lockmode);
1273
1274        if (error)
1275                return error;
1276        ASSERT(nimaps);
1277        return xfs_bmbt_to_iomap(ip, iomap, &imap, false);
1278}
1279
1280const struct iomap_ops xfs_xattr_iomap_ops = {
1281        .iomap_begin            = xfs_xattr_iomap_begin,
1282};
1283