linux/fs/xfs/xfs_aops.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_log.h"
  20#include "xfs_sb.h"
  21#include "xfs_ag.h"
  22#include "xfs_trans.h"
  23#include "xfs_mount.h"
  24#include "xfs_bmap_btree.h"
  25#include "xfs_dinode.h"
  26#include "xfs_inode.h"
  27#include "xfs_inode_item.h"
  28#include "xfs_alloc.h"
  29#include "xfs_error.h"
  30#include "xfs_iomap.h"
  31#include "xfs_vnodeops.h"
  32#include "xfs_trace.h"
  33#include "xfs_bmap.h"
  34#include <linux/aio.h>
  35#include <linux/gfp.h>
  36#include <linux/mpage.h>
  37#include <linux/pagevec.h>
  38#include <linux/writeback.h>
  39
  40void
  41xfs_count_page_state(
  42        struct page             *page,
  43        int                     *delalloc,
  44        int                     *unwritten)
  45{
  46        struct buffer_head      *bh, *head;
  47
  48        *delalloc = *unwritten = 0;
  49
  50        bh = head = page_buffers(page);
  51        do {
  52                if (buffer_unwritten(bh))
  53                        (*unwritten) = 1;
  54                else if (buffer_delay(bh))
  55                        (*delalloc) = 1;
  56        } while ((bh = bh->b_this_page) != head);
  57}
  58
  59STATIC struct block_device *
  60xfs_find_bdev_for_inode(
  61        struct inode            *inode)
  62{
  63        struct xfs_inode        *ip = XFS_I(inode);
  64        struct xfs_mount        *mp = ip->i_mount;
  65
  66        if (XFS_IS_REALTIME_INODE(ip))
  67                return mp->m_rtdev_targp->bt_bdev;
  68        else
  69                return mp->m_ddev_targp->bt_bdev;
  70}
  71
  72/*
  73 * We're now finished for good with this ioend structure.
  74 * Update the page state via the associated buffer_heads,
  75 * release holds on the inode and bio, and finally free
  76 * up memory.  Do not use the ioend after this.
  77 */
  78STATIC void
  79xfs_destroy_ioend(
  80        xfs_ioend_t             *ioend)
  81{
  82        struct buffer_head      *bh, *next;
  83
  84        for (bh = ioend->io_buffer_head; bh; bh = next) {
  85                next = bh->b_private;
  86                bh->b_end_io(bh, !ioend->io_error);
  87        }
  88
  89        if (ioend->io_iocb) {
  90                inode_dio_done(ioend->io_inode);
  91                if (ioend->io_isasync) {
  92                        aio_complete(ioend->io_iocb, ioend->io_error ?
  93                                        ioend->io_error : ioend->io_result, 0);
  94                }
  95        }
  96
  97        mempool_free(ioend, xfs_ioend_pool);
  98}
  99
 100/*
 101 * Fast and loose check if this write could update the on-disk inode size.
 102 */
 103static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
 104{
 105        return ioend->io_offset + ioend->io_size >
 106                XFS_I(ioend->io_inode)->i_d.di_size;
 107}
 108
 109STATIC int
 110xfs_setfilesize_trans_alloc(
 111        struct xfs_ioend        *ioend)
 112{
 113        struct xfs_mount        *mp = XFS_I(ioend->io_inode)->i_mount;
 114        struct xfs_trans        *tp;
 115        int                     error;
 116
 117        tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
 118
 119        error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
 120        if (error) {
 121                xfs_trans_cancel(tp, 0);
 122                return error;
 123        }
 124
 125        ioend->io_append_trans = tp;
 126
 127        /*
 128         * We may pass freeze protection with a transaction.  So tell lockdep
 129         * we released it.
 130         */
 131        rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
 132                      1, _THIS_IP_);
 133        /*
 134         * We hand off the transaction to the completion thread now, so
 135         * clear the flag here.
 136         */
 137        current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 138        return 0;
 139}
 140
 141/*
 142 * Update on-disk file size now that data has been written to disk.
 143 */
 144STATIC int
 145xfs_setfilesize(
 146        struct xfs_ioend        *ioend)
 147{
 148        struct xfs_inode        *ip = XFS_I(ioend->io_inode);
 149        struct xfs_trans        *tp = ioend->io_append_trans;
 150        xfs_fsize_t             isize;
 151
 152        /*
 153         * The transaction may have been allocated in the I/O submission thread,
 154         * thus we need to mark ourselves as beeing in a transaction manually.
 155         * Similarly for freeze protection.
 156         */
 157        current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
 158        rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
 159                           0, 1, _THIS_IP_);
 160
 161        xfs_ilock(ip, XFS_ILOCK_EXCL);
 162        isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
 163        if (!isize) {
 164                xfs_iunlock(ip, XFS_ILOCK_EXCL);
 165                xfs_trans_cancel(tp, 0);
 166                return 0;
 167        }
 168
 169        trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
 170
 171        ip->i_d.di_size = isize;
 172        xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 173        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 174
 175        return xfs_trans_commit(tp, 0);
 176}
 177
 178/*
 179 * Schedule IO completion handling on the final put of an ioend.
 180 *
 181 * If there is no work to do we might as well call it a day and free the
 182 * ioend right now.
 183 */
 184STATIC void
 185xfs_finish_ioend(
 186        struct xfs_ioend        *ioend)
 187{
 188        if (atomic_dec_and_test(&ioend->io_remaining)) {
 189                struct xfs_mount        *mp = XFS_I(ioend->io_inode)->i_mount;
 190
 191                if (ioend->io_type == XFS_IO_UNWRITTEN)
 192                        queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
 193                else if (ioend->io_append_trans ||
 194                         (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
 195                        queue_work(mp->m_data_workqueue, &ioend->io_work);
 196                else
 197                        xfs_destroy_ioend(ioend);
 198        }
 199}
 200
 201/*
 202 * IO write completion.
 203 */
 204STATIC void
 205xfs_end_io(
 206        struct work_struct *work)
 207{
 208        xfs_ioend_t     *ioend = container_of(work, xfs_ioend_t, io_work);
 209        struct xfs_inode *ip = XFS_I(ioend->io_inode);
 210        int             error = 0;
 211
 212        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 213                ioend->io_error = -EIO;
 214                goto done;
 215        }
 216        if (ioend->io_error)
 217                goto done;
 218
 219        /*
 220         * For unwritten extents we need to issue transactions to convert a
 221         * range to normal written extens after the data I/O has finished.
 222         */
 223        if (ioend->io_type == XFS_IO_UNWRITTEN) {
 224                error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
 225                                                  ioend->io_size);
 226        } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
 227                /*
 228                 * For direct I/O we do not know if we need to allocate blocks
 229                 * or not so we can't preallocate an append transaction as that
 230                 * results in nested reservations and log space deadlocks. Hence
 231                 * allocate the transaction here. While this is sub-optimal and
 232                 * can block IO completion for some time, we're stuck with doing
 233                 * it this way until we can pass the ioend to the direct IO
 234                 * allocation callbacks and avoid nesting that way.
 235                 */
 236                error = xfs_setfilesize_trans_alloc(ioend);
 237                if (error)
 238                        goto done;
 239                error = xfs_setfilesize(ioend);
 240        } else if (ioend->io_append_trans) {
 241                error = xfs_setfilesize(ioend);
 242        } else {
 243                ASSERT(!xfs_ioend_is_append(ioend));
 244        }
 245
 246done:
 247        if (error)
 248                ioend->io_error = -error;
 249        xfs_destroy_ioend(ioend);
 250}
 251
 252/*
 253 * Call IO completion handling in caller context on the final put of an ioend.
 254 */
 255STATIC void
 256xfs_finish_ioend_sync(
 257        struct xfs_ioend        *ioend)
 258{
 259        if (atomic_dec_and_test(&ioend->io_remaining))
 260                xfs_end_io(&ioend->io_work);
 261}
 262
 263/*
 264 * Allocate and initialise an IO completion structure.
 265 * We need to track unwritten extent write completion here initially.
 266 * We'll need to extend this for updating the ondisk inode size later
 267 * (vs. incore size).
 268 */
 269STATIC xfs_ioend_t *
 270xfs_alloc_ioend(
 271        struct inode            *inode,
 272        unsigned int            type)
 273{
 274        xfs_ioend_t             *ioend;
 275
 276        ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
 277
 278        /*
 279         * Set the count to 1 initially, which will prevent an I/O
 280         * completion callback from happening before we have started
 281         * all the I/O from calling the completion routine too early.
 282         */
 283        atomic_set(&ioend->io_remaining, 1);
 284        ioend->io_isasync = 0;
 285        ioend->io_isdirect = 0;
 286        ioend->io_error = 0;
 287        ioend->io_list = NULL;
 288        ioend->io_type = type;
 289        ioend->io_inode = inode;
 290        ioend->io_buffer_head = NULL;
 291        ioend->io_buffer_tail = NULL;
 292        ioend->io_offset = 0;
 293        ioend->io_size = 0;
 294        ioend->io_iocb = NULL;
 295        ioend->io_result = 0;
 296        ioend->io_append_trans = NULL;
 297
 298        INIT_WORK(&ioend->io_work, xfs_end_io);
 299        return ioend;
 300}
 301
 302STATIC int
 303xfs_map_blocks(
 304        struct inode            *inode,
 305        loff_t                  offset,
 306        struct xfs_bmbt_irec    *imap,
 307        int                     type,
 308        int                     nonblocking)
 309{
 310        struct xfs_inode        *ip = XFS_I(inode);
 311        struct xfs_mount        *mp = ip->i_mount;
 312        ssize_t                 count = 1 << inode->i_blkbits;
 313        xfs_fileoff_t           offset_fsb, end_fsb;
 314        int                     error = 0;
 315        int                     bmapi_flags = XFS_BMAPI_ENTIRE;
 316        int                     nimaps = 1;
 317
 318        if (XFS_FORCED_SHUTDOWN(mp))
 319                return -XFS_ERROR(EIO);
 320
 321        if (type == XFS_IO_UNWRITTEN)
 322                bmapi_flags |= XFS_BMAPI_IGSTATE;
 323
 324        if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
 325                if (nonblocking)
 326                        return -XFS_ERROR(EAGAIN);
 327                xfs_ilock(ip, XFS_ILOCK_SHARED);
 328        }
 329
 330        ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
 331               (ip->i_df.if_flags & XFS_IFEXTENTS));
 332        ASSERT(offset <= mp->m_super->s_maxbytes);
 333
 334        if (offset + count > mp->m_super->s_maxbytes)
 335                count = mp->m_super->s_maxbytes - offset;
 336        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
 337        offset_fsb = XFS_B_TO_FSBT(mp, offset);
 338        error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
 339                                imap, &nimaps, bmapi_flags);
 340        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 341
 342        if (error)
 343                return -XFS_ERROR(error);
 344
 345        if (type == XFS_IO_DELALLOC &&
 346            (!nimaps || isnullstartblock(imap->br_startblock))) {
 347                error = xfs_iomap_write_allocate(ip, offset, count, imap);
 348                if (!error)
 349                        trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
 350                return -XFS_ERROR(error);
 351        }
 352
 353#ifdef DEBUG
 354        if (type == XFS_IO_UNWRITTEN) {
 355                ASSERT(nimaps);
 356                ASSERT(imap->br_startblock != HOLESTARTBLOCK);
 357                ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
 358        }
 359#endif
 360        if (nimaps)
 361                trace_xfs_map_blocks_found(ip, offset, count, type, imap);
 362        return 0;
 363}
 364
 365STATIC int
 366xfs_imap_valid(
 367        struct inode            *inode,
 368        struct xfs_bmbt_irec    *imap,
 369        xfs_off_t               offset)
 370{
 371        offset >>= inode->i_blkbits;
 372
 373        return offset >= imap->br_startoff &&
 374                offset < imap->br_startoff + imap->br_blockcount;
 375}
 376
 377/*
 378 * BIO completion handler for buffered IO.
 379 */
 380STATIC void
 381xfs_end_bio(
 382        struct bio              *bio,
 383        int                     error)
 384{
 385        xfs_ioend_t             *ioend = bio->bi_private;
 386
 387        ASSERT(atomic_read(&bio->bi_cnt) >= 1);
 388        ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
 389
 390        /* Toss bio and pass work off to an xfsdatad thread */
 391        bio->bi_private = NULL;
 392        bio->bi_end_io = NULL;
 393        bio_put(bio);
 394
 395        xfs_finish_ioend(ioend);
 396}
 397
 398STATIC void
 399xfs_submit_ioend_bio(
 400        struct writeback_control *wbc,
 401        xfs_ioend_t             *ioend,
 402        struct bio              *bio)
 403{
 404        atomic_inc(&ioend->io_remaining);
 405        bio->bi_private = ioend;
 406        bio->bi_end_io = xfs_end_bio;
 407        submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
 408}
 409
 410STATIC struct bio *
 411xfs_alloc_ioend_bio(
 412        struct buffer_head      *bh)
 413{
 414        int                     nvecs = bio_get_nr_vecs(bh->b_bdev);
 415        struct bio              *bio = bio_alloc(GFP_NOIO, nvecs);
 416
 417        ASSERT(bio->bi_private == NULL);
 418        bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 419        bio->bi_bdev = bh->b_bdev;
 420        return bio;
 421}
 422
 423STATIC void
 424xfs_start_buffer_writeback(
 425        struct buffer_head      *bh)
 426{
 427        ASSERT(buffer_mapped(bh));
 428        ASSERT(buffer_locked(bh));
 429        ASSERT(!buffer_delay(bh));
 430        ASSERT(!buffer_unwritten(bh));
 431
 432        mark_buffer_async_write(bh);
 433        set_buffer_uptodate(bh);
 434        clear_buffer_dirty(bh);
 435}
 436
 437STATIC void
 438xfs_start_page_writeback(
 439        struct page             *page,
 440        int                     clear_dirty,
 441        int                     buffers)
 442{
 443        ASSERT(PageLocked(page));
 444        ASSERT(!PageWriteback(page));
 445        if (clear_dirty)
 446                clear_page_dirty_for_io(page);
 447        set_page_writeback(page);
 448        unlock_page(page);
 449        /* If no buffers on the page are to be written, finish it here */
 450        if (!buffers)
 451                end_page_writeback(page);
 452}
 453
 454static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
 455{
 456        return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
 457}
 458
 459/*
 460 * Submit all of the bios for all of the ioends we have saved up, covering the
 461 * initial writepage page and also any probed pages.
 462 *
 463 * Because we may have multiple ioends spanning a page, we need to start
 464 * writeback on all the buffers before we submit them for I/O. If we mark the
 465 * buffers as we got, then we can end up with a page that only has buffers
 466 * marked async write and I/O complete on can occur before we mark the other
 467 * buffers async write.
 468 *
 469 * The end result of this is that we trip a bug in end_page_writeback() because
 470 * we call it twice for the one page as the code in end_buffer_async_write()
 471 * assumes that all buffers on the page are started at the same time.
 472 *
 473 * The fix is two passes across the ioend list - one to start writeback on the
 474 * buffer_heads, and then submit them for I/O on the second pass.
 475 *
 476 * If @fail is non-zero, it means that we have a situation where some part of
 477 * the submission process has failed after we have marked paged for writeback
 478 * and unlocked them. In this situation, we need to fail the ioend chain rather
 479 * than submit it to IO. This typically only happens on a filesystem shutdown.
 480 */
 481STATIC void
 482xfs_submit_ioend(
 483        struct writeback_control *wbc,
 484        xfs_ioend_t             *ioend,
 485        int                     fail)
 486{
 487        xfs_ioend_t             *head = ioend;
 488        xfs_ioend_t             *next;
 489        struct buffer_head      *bh;
 490        struct bio              *bio;
 491        sector_t                lastblock = 0;
 492
 493        /* Pass 1 - start writeback */
 494        do {
 495                next = ioend->io_list;
 496                for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
 497                        xfs_start_buffer_writeback(bh);
 498        } while ((ioend = next) != NULL);
 499
 500        /* Pass 2 - submit I/O */
 501        ioend = head;
 502        do {
 503                next = ioend->io_list;
 504                bio = NULL;
 505
 506                /*
 507                 * If we are failing the IO now, just mark the ioend with an
 508                 * error and finish it. This will run IO completion immediately
 509                 * as there is only one reference to the ioend at this point in
 510                 * time.
 511                 */
 512                if (fail) {
 513                        ioend->io_error = -fail;
 514                        xfs_finish_ioend(ioend);
 515                        continue;
 516                }
 517
 518                for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
 519
 520                        if (!bio) {
 521 retry:
 522                                bio = xfs_alloc_ioend_bio(bh);
 523                        } else if (bh->b_blocknr != lastblock + 1) {
 524                                xfs_submit_ioend_bio(wbc, ioend, bio);
 525                                goto retry;
 526                        }
 527
 528                        if (bio_add_buffer(bio, bh) != bh->b_size) {
 529                                xfs_submit_ioend_bio(wbc, ioend, bio);
 530                                goto retry;
 531                        }
 532
 533                        lastblock = bh->b_blocknr;
 534                }
 535                if (bio)
 536                        xfs_submit_ioend_bio(wbc, ioend, bio);
 537                xfs_finish_ioend(ioend);
 538        } while ((ioend = next) != NULL);
 539}
 540
 541/*
 542 * Cancel submission of all buffer_heads so far in this endio.
 543 * Toss the endio too.  Only ever called for the initial page
 544 * in a writepage request, so only ever one page.
 545 */
 546STATIC void
 547xfs_cancel_ioend(
 548        xfs_ioend_t             *ioend)
 549{
 550        xfs_ioend_t             *next;
 551        struct buffer_head      *bh, *next_bh;
 552
 553        do {
 554                next = ioend->io_list;
 555                bh = ioend->io_buffer_head;
 556                do {
 557                        next_bh = bh->b_private;
 558                        clear_buffer_async_write(bh);
 559                        unlock_buffer(bh);
 560                } while ((bh = next_bh) != NULL);
 561
 562                mempool_free(ioend, xfs_ioend_pool);
 563        } while ((ioend = next) != NULL);
 564}
 565
 566/*
 567 * Test to see if we've been building up a completion structure for
 568 * earlier buffers -- if so, we try to append to this ioend if we
 569 * can, otherwise we finish off any current ioend and start another.
 570 * Return true if we've finished the given ioend.
 571 */
 572STATIC void
 573xfs_add_to_ioend(
 574        struct inode            *inode,
 575        struct buffer_head      *bh,
 576        xfs_off_t               offset,
 577        unsigned int            type,
 578        xfs_ioend_t             **result,
 579        int                     need_ioend)
 580{
 581        xfs_ioend_t             *ioend = *result;
 582
 583        if (!ioend || need_ioend || type != ioend->io_type) {
 584                xfs_ioend_t     *previous = *result;
 585
 586                ioend = xfs_alloc_ioend(inode, type);
 587                ioend->io_offset = offset;
 588                ioend->io_buffer_head = bh;
 589                ioend->io_buffer_tail = bh;
 590                if (previous)
 591                        previous->io_list = ioend;
 592                *result = ioend;
 593        } else {
 594                ioend->io_buffer_tail->b_private = bh;
 595                ioend->io_buffer_tail = bh;
 596        }
 597
 598        bh->b_private = NULL;
 599        ioend->io_size += bh->b_size;
 600}
 601
 602STATIC void
 603xfs_map_buffer(
 604        struct inode            *inode,
 605        struct buffer_head      *bh,
 606        struct xfs_bmbt_irec    *imap,
 607        xfs_off_t               offset)
 608{
 609        sector_t                bn;
 610        struct xfs_mount        *m = XFS_I(inode)->i_mount;
 611        xfs_off_t               iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
 612        xfs_daddr_t             iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
 613
 614        ASSERT(imap->br_startblock != HOLESTARTBLOCK);
 615        ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
 616
 617        bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
 618              ((offset - iomap_offset) >> inode->i_blkbits);
 619
 620        ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
 621
 622        bh->b_blocknr = bn;
 623        set_buffer_mapped(bh);
 624}
 625
 626STATIC void
 627xfs_map_at_offset(
 628        struct inode            *inode,
 629        struct buffer_head      *bh,
 630        struct xfs_bmbt_irec    *imap,
 631        xfs_off_t               offset)
 632{
 633        ASSERT(imap->br_startblock != HOLESTARTBLOCK);
 634        ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
 635
 636        xfs_map_buffer(inode, bh, imap, offset);
 637        set_buffer_mapped(bh);
 638        clear_buffer_delay(bh);
 639        clear_buffer_unwritten(bh);
 640}
 641
 642/*
 643 * Test if a given page is suitable for writing as part of an unwritten
 644 * or delayed allocate extent.
 645 */
 646STATIC int
 647xfs_check_page_type(
 648        struct page             *page,
 649        unsigned int            type)
 650{
 651        if (PageWriteback(page))
 652                return 0;
 653
 654        if (page->mapping && page_has_buffers(page)) {
 655                struct buffer_head      *bh, *head;
 656                int                     acceptable = 0;
 657
 658                bh = head = page_buffers(page);
 659                do {
 660                        if (buffer_unwritten(bh))
 661                                acceptable += (type == XFS_IO_UNWRITTEN);
 662                        else if (buffer_delay(bh))
 663                                acceptable += (type == XFS_IO_DELALLOC);
 664                        else if (buffer_dirty(bh) && buffer_mapped(bh))
 665                                acceptable += (type == XFS_IO_OVERWRITE);
 666                        else
 667                                break;
 668                } while ((bh = bh->b_this_page) != head);
 669
 670                if (acceptable)
 671                        return 1;
 672        }
 673
 674        return 0;
 675}
 676
 677/*
 678 * Allocate & map buffers for page given the extent map. Write it out.
 679 * except for the original page of a writepage, this is called on
 680 * delalloc/unwritten pages only, for the original page it is possible
 681 * that the page has no mapping at all.
 682 */
 683STATIC int
 684xfs_convert_page(
 685        struct inode            *inode,
 686        struct page             *page,
 687        loff_t                  tindex,
 688        struct xfs_bmbt_irec    *imap,
 689        xfs_ioend_t             **ioendp,
 690        struct writeback_control *wbc)
 691{
 692        struct buffer_head      *bh, *head;
 693        xfs_off_t               end_offset;
 694        unsigned long           p_offset;
 695        unsigned int            type;
 696        int                     len, page_dirty;
 697        int                     count = 0, done = 0, uptodate = 1;
 698        xfs_off_t               offset = page_offset(page);
 699
 700        if (page->index != tindex)
 701                goto fail;
 702        if (!trylock_page(page))
 703                goto fail;
 704        if (PageWriteback(page))
 705                goto fail_unlock_page;
 706        if (page->mapping != inode->i_mapping)
 707                goto fail_unlock_page;
 708        if (!xfs_check_page_type(page, (*ioendp)->io_type))
 709                goto fail_unlock_page;
 710
 711        /*
 712         * page_dirty is initially a count of buffers on the page before
 713         * EOF and is decremented as we move each into a cleanable state.
 714         *
 715         * Derivation:
 716         *
 717         * End offset is the highest offset that this page should represent.
 718         * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
 719         * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
 720         * hence give us the correct page_dirty count. On any other page,
 721         * it will be zero and in that case we need page_dirty to be the
 722         * count of buffers on the page.
 723         */
 724        end_offset = min_t(unsigned long long,
 725                        (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
 726                        i_size_read(inode));
 727
 728        /*
 729         * If the current map does not span the entire page we are about to try
 730         * to write, then give up. The only way we can write a page that spans
 731         * multiple mappings in a single writeback iteration is via the
 732         * xfs_vm_writepage() function. Data integrity writeback requires the
 733         * entire page to be written in a single attempt, otherwise the part of
 734         * the page we don't write here doesn't get written as part of the data
 735         * integrity sync.
 736         *
 737         * For normal writeback, we also don't attempt to write partial pages
 738         * here as it simply means that write_cache_pages() will see it under
 739         * writeback and ignore the page until some point in the future, at
 740         * which time this will be the only page in the file that needs
 741         * writeback.  Hence for more optimal IO patterns, we should always
 742         * avoid partial page writeback due to multiple mappings on a page here.
 743         */
 744        if (!xfs_imap_valid(inode, imap, end_offset))
 745                goto fail_unlock_page;
 746
 747        len = 1 << inode->i_blkbits;
 748        p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
 749                                        PAGE_CACHE_SIZE);
 750        p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
 751        page_dirty = p_offset / len;
 752
 753        bh = head = page_buffers(page);
 754        do {
 755                if (offset >= end_offset)
 756                        break;
 757                if (!buffer_uptodate(bh))
 758                        uptodate = 0;
 759                if (!(PageUptodate(page) || buffer_uptodate(bh))) {
 760                        done = 1;
 761                        continue;
 762                }
 763
 764                if (buffer_unwritten(bh) || buffer_delay(bh) ||
 765                    buffer_mapped(bh)) {
 766                        if (buffer_unwritten(bh))
 767                                type = XFS_IO_UNWRITTEN;
 768                        else if (buffer_delay(bh))
 769                                type = XFS_IO_DELALLOC;
 770                        else
 771                                type = XFS_IO_OVERWRITE;
 772
 773                        if (!xfs_imap_valid(inode, imap, offset)) {
 774                                done = 1;
 775                                continue;
 776                        }
 777
 778                        lock_buffer(bh);
 779                        if (type != XFS_IO_OVERWRITE)
 780                                xfs_map_at_offset(inode, bh, imap, offset);
 781                        xfs_add_to_ioend(inode, bh, offset, type,
 782                                         ioendp, done);
 783
 784                        page_dirty--;
 785                        count++;
 786                } else {
 787                        done = 1;
 788                }
 789        } while (offset += len, (bh = bh->b_this_page) != head);
 790
 791        if (uptodate && bh == head)
 792                SetPageUptodate(page);
 793
 794        if (count) {
 795                if (--wbc->nr_to_write <= 0 &&
 796                    wbc->sync_mode == WB_SYNC_NONE)
 797                        done = 1;
 798        }
 799        xfs_start_page_writeback(page, !page_dirty, count);
 800
 801        return done;
 802 fail_unlock_page:
 803        unlock_page(page);
 804 fail:
 805        return 1;
 806}
 807
 808/*
 809 * Convert & write out a cluster of pages in the same extent as defined
 810 * by mp and following the start page.
 811 */
 812STATIC void
 813xfs_cluster_write(
 814        struct inode            *inode,
 815        pgoff_t                 tindex,
 816        struct xfs_bmbt_irec    *imap,
 817        xfs_ioend_t             **ioendp,
 818        struct writeback_control *wbc,
 819        pgoff_t                 tlast)
 820{
 821        struct pagevec          pvec;
 822        int                     done = 0, i;
 823
 824        pagevec_init(&pvec, 0);
 825        while (!done && tindex <= tlast) {
 826                unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
 827
 828                if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
 829                        break;
 830
 831                for (i = 0; i < pagevec_count(&pvec); i++) {
 832                        done = xfs_convert_page(inode, pvec.pages[i], tindex++,
 833                                        imap, ioendp, wbc);
 834                        if (done)
 835                                break;
 836                }
 837
 838                pagevec_release(&pvec);
 839                cond_resched();
 840        }
 841}
 842
 843STATIC void
 844xfs_vm_invalidatepage(
 845        struct page             *page,
 846        unsigned int            offset,
 847        unsigned int            length)
 848{
 849        trace_xfs_invalidatepage(page->mapping->host, page, offset,
 850                                 length);
 851        block_invalidatepage(page, offset, length);
 852}
 853
 854/*
 855 * If the page has delalloc buffers on it, we need to punch them out before we
 856 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
 857 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
 858 * is done on that same region - the delalloc extent is returned when none is
 859 * supposed to be there.
 860 *
 861 * We prevent this by truncating away the delalloc regions on the page before
 862 * invalidating it. Because they are delalloc, we can do this without needing a
 863 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
 864 * truncation without a transaction as there is no space left for block
 865 * reservation (typically why we see a ENOSPC in writeback).
 866 *
 867 * This is not a performance critical path, so for now just do the punching a
 868 * buffer head at a time.
 869 */
 870STATIC void
 871xfs_aops_discard_page(
 872        struct page             *page)
 873{
 874        struct inode            *inode = page->mapping->host;
 875        struct xfs_inode        *ip = XFS_I(inode);
 876        struct buffer_head      *bh, *head;
 877        loff_t                  offset = page_offset(page);
 878
 879        if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
 880                goto out_invalidate;
 881
 882        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 883                goto out_invalidate;
 884
 885        xfs_alert(ip->i_mount,
 886                "page discard on page %p, inode 0x%llx, offset %llu.",
 887                        page, ip->i_ino, offset);
 888
 889        xfs_ilock(ip, XFS_ILOCK_EXCL);
 890        bh = head = page_buffers(page);
 891        do {
 892                int             error;
 893                xfs_fileoff_t   start_fsb;
 894
 895                if (!buffer_delay(bh))
 896                        goto next_buffer;
 897
 898                start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
 899                error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
 900                if (error) {
 901                        /* something screwed, just bail */
 902                        if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 903                                xfs_alert(ip->i_mount,
 904                        "page discard unable to remove delalloc mapping.");
 905                        }
 906                        break;
 907                }
 908next_buffer:
 909                offset += 1 << inode->i_blkbits;
 910
 911        } while ((bh = bh->b_this_page) != head);
 912
 913        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 914out_invalidate:
 915        xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
 916        return;
 917}
 918
 919/*
 920 * Write out a dirty page.
 921 *
 922 * For delalloc space on the page we need to allocate space and flush it.
 923 * For unwritten space on the page we need to start the conversion to
 924 * regular allocated space.
 925 * For any other dirty buffer heads on the page we should flush them.
 926 */
 927STATIC int
 928xfs_vm_writepage(
 929        struct page             *page,
 930        struct writeback_control *wbc)
 931{
 932        struct inode            *inode = page->mapping->host;
 933        struct buffer_head      *bh, *head;
 934        struct xfs_bmbt_irec    imap;
 935        xfs_ioend_t             *ioend = NULL, *iohead = NULL;
 936        loff_t                  offset;
 937        unsigned int            type;
 938        __uint64_t              end_offset;
 939        pgoff_t                 end_index, last_index;
 940        ssize_t                 len;
 941        int                     err, imap_valid = 0, uptodate = 1;
 942        int                     count = 0;
 943        int                     nonblocking = 0;
 944
 945        trace_xfs_writepage(inode, page, 0, 0);
 946
 947        ASSERT(page_has_buffers(page));
 948
 949        /*
 950         * Refuse to write the page out if we are called from reclaim context.
 951         *
 952         * This avoids stack overflows when called from deeply used stacks in
 953         * random callers for direct reclaim or memcg reclaim.  We explicitly
 954         * allow reclaim from kswapd as the stack usage there is relatively low.
 955         *
 956         * This should never happen except in the case of a VM regression so
 957         * warn about it.
 958         */
 959        if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
 960                        PF_MEMALLOC))
 961                goto redirty;
 962
 963        /*
 964         * Given that we do not allow direct reclaim to call us, we should
 965         * never be called while in a filesystem transaction.
 966         */
 967        if (WARN_ON(current->flags & PF_FSTRANS))
 968                goto redirty;
 969
 970        /* Is this page beyond the end of the file? */
 971        offset = i_size_read(inode);
 972        end_index = offset >> PAGE_CACHE_SHIFT;
 973        last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
 974        if (page->index >= end_index) {
 975                unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
 976
 977                /*
 978                 * Skip the page if it is fully outside i_size, e.g. due to a
 979                 * truncate operation that is in progress. We must redirty the
 980                 * page so that reclaim stops reclaiming it. Otherwise
 981                 * xfs_vm_releasepage() is called on it and gets confused.
 982                 */
 983                if (page->index >= end_index + 1 || offset_into_page == 0)
 984                        goto redirty;
 985
 986                /*
 987                 * The page straddles i_size.  It must be zeroed out on each
 988                 * and every writepage invocation because it may be mmapped.
 989                 * "A file is mapped in multiples of the page size.  For a file
 990                 * that is not a multiple of the  page size, the remaining
 991                 * memory is zeroed when mapped, and writes to that region are
 992                 * not written out to the file."
 993                 */
 994                zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
 995        }
 996
 997        end_offset = min_t(unsigned long long,
 998                        (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
 999                        offset);
1000        len = 1 << inode->i_blkbits;
1001
1002        bh = head = page_buffers(page);
1003        offset = page_offset(page);
1004        type = XFS_IO_OVERWRITE;
1005
1006        if (wbc->sync_mode == WB_SYNC_NONE)
1007                nonblocking = 1;
1008
1009        do {
1010                int new_ioend = 0;
1011
1012                if (offset >= end_offset)
1013                        break;
1014                if (!buffer_uptodate(bh))
1015                        uptodate = 0;
1016
1017                /*
1018                 * set_page_dirty dirties all buffers in a page, independent
1019                 * of their state.  The dirty state however is entirely
1020                 * meaningless for holes (!mapped && uptodate), so skip
1021                 * buffers covering holes here.
1022                 */
1023                if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1024                        imap_valid = 0;
1025                        continue;
1026                }
1027
1028                if (buffer_unwritten(bh)) {
1029                        if (type != XFS_IO_UNWRITTEN) {
1030                                type = XFS_IO_UNWRITTEN;
1031                                imap_valid = 0;
1032                        }
1033                } else if (buffer_delay(bh)) {
1034                        if (type != XFS_IO_DELALLOC) {
1035                                type = XFS_IO_DELALLOC;
1036                                imap_valid = 0;
1037                        }
1038                } else if (buffer_uptodate(bh)) {
1039                        if (type != XFS_IO_OVERWRITE) {
1040                                type = XFS_IO_OVERWRITE;
1041                                imap_valid = 0;
1042                        }
1043                } else {
1044                        if (PageUptodate(page))
1045                                ASSERT(buffer_mapped(bh));
1046                        /*
1047                         * This buffer is not uptodate and will not be
1048                         * written to disk.  Ensure that we will put any
1049                         * subsequent writeable buffers into a new
1050                         * ioend.
1051                         */
1052                        imap_valid = 0;
1053                        continue;
1054                }
1055
1056                if (imap_valid)
1057                        imap_valid = xfs_imap_valid(inode, &imap, offset);
1058                if (!imap_valid) {
1059                        /*
1060                         * If we didn't have a valid mapping then we need to
1061                         * put the new mapping into a separate ioend structure.
1062                         * This ensures non-contiguous extents always have
1063                         * separate ioends, which is particularly important
1064                         * for unwritten extent conversion at I/O completion
1065                         * time.
1066                         */
1067                        new_ioend = 1;
1068                        err = xfs_map_blocks(inode, offset, &imap, type,
1069                                             nonblocking);
1070                        if (err)
1071                                goto error;
1072                        imap_valid = xfs_imap_valid(inode, &imap, offset);
1073                }
1074                if (imap_valid) {
1075                        lock_buffer(bh);
1076                        if (type != XFS_IO_OVERWRITE)
1077                                xfs_map_at_offset(inode, bh, &imap, offset);
1078                        xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1079                                         new_ioend);
1080                        count++;
1081                }
1082
1083                if (!iohead)
1084                        iohead = ioend;
1085
1086        } while (offset += len, ((bh = bh->b_this_page) != head));
1087
1088        if (uptodate && bh == head)
1089                SetPageUptodate(page);
1090
1091        xfs_start_page_writeback(page, 1, count);
1092
1093        /* if there is no IO to be submitted for this page, we are done */
1094        if (!ioend)
1095                return 0;
1096
1097        ASSERT(iohead);
1098
1099        /*
1100         * Any errors from this point onwards need tobe reported through the IO
1101         * completion path as we have marked the initial page as under writeback
1102         * and unlocked it.
1103         */
1104        if (imap_valid) {
1105                xfs_off_t               end_index;
1106
1107                end_index = imap.br_startoff + imap.br_blockcount;
1108
1109                /* to bytes */
1110                end_index <<= inode->i_blkbits;
1111
1112                /* to pages */
1113                end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1114
1115                /* check against file size */
1116                if (end_index > last_index)
1117                        end_index = last_index;
1118
1119                xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1120                                  wbc, end_index);
1121        }
1122
1123
1124        /*
1125         * Reserve log space if we might write beyond the on-disk inode size.
1126         */
1127        err = 0;
1128        if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1129                err = xfs_setfilesize_trans_alloc(ioend);
1130
1131        xfs_submit_ioend(wbc, iohead, err);
1132
1133        return 0;
1134
1135error:
1136        if (iohead)
1137                xfs_cancel_ioend(iohead);
1138
1139        if (err == -EAGAIN)
1140                goto redirty;
1141
1142        xfs_aops_discard_page(page);
1143        ClearPageUptodate(page);
1144        unlock_page(page);
1145        return err;
1146
1147redirty:
1148        redirty_page_for_writepage(wbc, page);
1149        unlock_page(page);
1150        return 0;
1151}
1152
1153STATIC int
1154xfs_vm_writepages(
1155        struct address_space    *mapping,
1156        struct writeback_control *wbc)
1157{
1158        xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1159        return generic_writepages(mapping, wbc);
1160}
1161
1162/*
1163 * Called to move a page into cleanable state - and from there
1164 * to be released. The page should already be clean. We always
1165 * have buffer heads in this call.
1166 *
1167 * Returns 1 if the page is ok to release, 0 otherwise.
1168 */
1169STATIC int
1170xfs_vm_releasepage(
1171        struct page             *page,
1172        gfp_t                   gfp_mask)
1173{
1174        int                     delalloc, unwritten;
1175
1176        trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1177
1178        xfs_count_page_state(page, &delalloc, &unwritten);
1179
1180        if (WARN_ON(delalloc))
1181                return 0;
1182        if (WARN_ON(unwritten))
1183                return 0;
1184
1185        return try_to_free_buffers(page);
1186}
1187
1188STATIC int
1189__xfs_get_blocks(
1190        struct inode            *inode,
1191        sector_t                iblock,
1192        struct buffer_head      *bh_result,
1193        int                     create,
1194        int                     direct)
1195{
1196        struct xfs_inode        *ip = XFS_I(inode);
1197        struct xfs_mount        *mp = ip->i_mount;
1198        xfs_fileoff_t           offset_fsb, end_fsb;
1199        int                     error = 0;
1200        int                     lockmode = 0;
1201        struct xfs_bmbt_irec    imap;
1202        int                     nimaps = 1;
1203        xfs_off_t               offset;
1204        ssize_t                 size;
1205        int                     new = 0;
1206
1207        if (XFS_FORCED_SHUTDOWN(mp))
1208                return -XFS_ERROR(EIO);
1209
1210        offset = (xfs_off_t)iblock << inode->i_blkbits;
1211        ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1212        size = bh_result->b_size;
1213
1214        if (!create && direct && offset >= i_size_read(inode))
1215                return 0;
1216
1217        /*
1218         * Direct I/O is usually done on preallocated files, so try getting
1219         * a block mapping without an exclusive lock first.  For buffered
1220         * writes we already have the exclusive iolock anyway, so avoiding
1221         * a lock roundtrip here by taking the ilock exclusive from the
1222         * beginning is a useful micro optimization.
1223         */
1224        if (create && !direct) {
1225                lockmode = XFS_ILOCK_EXCL;
1226                xfs_ilock(ip, lockmode);
1227        } else {
1228                lockmode = xfs_ilock_map_shared(ip);
1229        }
1230
1231        ASSERT(offset <= mp->m_super->s_maxbytes);
1232        if (offset + size > mp->m_super->s_maxbytes)
1233                size = mp->m_super->s_maxbytes - offset;
1234        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1235        offset_fsb = XFS_B_TO_FSBT(mp, offset);
1236
1237        error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1238                                &imap, &nimaps, XFS_BMAPI_ENTIRE);
1239        if (error)
1240                goto out_unlock;
1241
1242        if (create &&
1243            (!nimaps ||
1244             (imap.br_startblock == HOLESTARTBLOCK ||
1245              imap.br_startblock == DELAYSTARTBLOCK))) {
1246                if (direct || xfs_get_extsz_hint(ip)) {
1247                        /*
1248                         * Drop the ilock in preparation for starting the block
1249                         * allocation transaction.  It will be retaken
1250                         * exclusively inside xfs_iomap_write_direct for the
1251                         * actual allocation.
1252                         */
1253                        xfs_iunlock(ip, lockmode);
1254                        error = xfs_iomap_write_direct(ip, offset, size,
1255                                                       &imap, nimaps);
1256                        if (error)
1257                                return -error;
1258                        new = 1;
1259                } else {
1260                        /*
1261                         * Delalloc reservations do not require a transaction,
1262                         * we can go on without dropping the lock here. If we
1263                         * are allocating a new delalloc block, make sure that
1264                         * we set the new flag so that we mark the buffer new so
1265                         * that we know that it is newly allocated if the write
1266                         * fails.
1267                         */
1268                        if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1269                                new = 1;
1270                        error = xfs_iomap_write_delay(ip, offset, size, &imap);
1271                        if (error)
1272                                goto out_unlock;
1273
1274                        xfs_iunlock(ip, lockmode);
1275                }
1276
1277                trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1278        } else if (nimaps) {
1279                trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1280                xfs_iunlock(ip, lockmode);
1281        } else {
1282                trace_xfs_get_blocks_notfound(ip, offset, size);
1283                goto out_unlock;
1284        }
1285
1286        if (imap.br_startblock != HOLESTARTBLOCK &&
1287            imap.br_startblock != DELAYSTARTBLOCK) {
1288                /*
1289                 * For unwritten extents do not report a disk address on
1290                 * the read case (treat as if we're reading into a hole).
1291                 */
1292                if (create || !ISUNWRITTEN(&imap))
1293                        xfs_map_buffer(inode, bh_result, &imap, offset);
1294                if (create && ISUNWRITTEN(&imap)) {
1295                        if (direct)
1296                                bh_result->b_private = inode;
1297                        set_buffer_unwritten(bh_result);
1298                }
1299        }
1300
1301        /*
1302         * If this is a realtime file, data may be on a different device.
1303         * to that pointed to from the buffer_head b_bdev currently.
1304         */
1305        bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1306
1307        /*
1308         * If we previously allocated a block out beyond eof and we are now
1309         * coming back to use it then we will need to flag it as new even if it
1310         * has a disk address.
1311         *
1312         * With sub-block writes into unwritten extents we also need to mark
1313         * the buffer as new so that the unwritten parts of the buffer gets
1314         * correctly zeroed.
1315         */
1316        if (create &&
1317            ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1318             (offset >= i_size_read(inode)) ||
1319             (new || ISUNWRITTEN(&imap))))
1320                set_buffer_new(bh_result);
1321
1322        if (imap.br_startblock == DELAYSTARTBLOCK) {
1323                BUG_ON(direct);
1324                if (create) {
1325                        set_buffer_uptodate(bh_result);
1326                        set_buffer_mapped(bh_result);
1327                        set_buffer_delay(bh_result);
1328                }
1329        }
1330
1331        /*
1332         * If this is O_DIRECT or the mpage code calling tell them how large
1333         * the mapping is, so that we can avoid repeated get_blocks calls.
1334         */
1335        if (direct || size > (1 << inode->i_blkbits)) {
1336                xfs_off_t               mapping_size;
1337
1338                mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1339                mapping_size <<= inode->i_blkbits;
1340
1341                ASSERT(mapping_size > 0);
1342                if (mapping_size > size)
1343                        mapping_size = size;
1344                if (mapping_size > LONG_MAX)
1345                        mapping_size = LONG_MAX;
1346
1347                bh_result->b_size = mapping_size;
1348        }
1349
1350        return 0;
1351
1352out_unlock:
1353        xfs_iunlock(ip, lockmode);
1354        return -error;
1355}
1356
1357int
1358xfs_get_blocks(
1359        struct inode            *inode,
1360        sector_t                iblock,
1361        struct buffer_head      *bh_result,
1362        int                     create)
1363{
1364        return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1365}
1366
1367STATIC int
1368xfs_get_blocks_direct(
1369        struct inode            *inode,
1370        sector_t                iblock,
1371        struct buffer_head      *bh_result,
1372        int                     create)
1373{
1374        return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1375}
1376
1377/*
1378 * Complete a direct I/O write request.
1379 *
1380 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1381 * need to issue a transaction to convert the range from unwritten to written
1382 * extents.  In case this is regular synchronous I/O we just call xfs_end_io
1383 * to do this and we are done.  But in case this was a successful AIO
1384 * request this handler is called from interrupt context, from which we
1385 * can't start transactions.  In that case offload the I/O completion to
1386 * the workqueues we also use for buffered I/O completion.
1387 */
1388STATIC void
1389xfs_end_io_direct_write(
1390        struct kiocb            *iocb,
1391        loff_t                  offset,
1392        ssize_t                 size,
1393        void                    *private,
1394        int                     ret,
1395        bool                    is_async)
1396{
1397        struct xfs_ioend        *ioend = iocb->private;
1398
1399        /*
1400         * While the generic direct I/O code updates the inode size, it does
1401         * so only after the end_io handler is called, which means our
1402         * end_io handler thinks the on-disk size is outside the in-core
1403         * size.  To prevent this just update it a little bit earlier here.
1404         */
1405        if (offset + size > i_size_read(ioend->io_inode))
1406                i_size_write(ioend->io_inode, offset + size);
1407
1408        /*
1409         * blockdev_direct_IO can return an error even after the I/O
1410         * completion handler was called.  Thus we need to protect
1411         * against double-freeing.
1412         */
1413        iocb->private = NULL;
1414
1415        ioend->io_offset = offset;
1416        ioend->io_size = size;
1417        ioend->io_iocb = iocb;
1418        ioend->io_result = ret;
1419        if (private && size > 0)
1420                ioend->io_type = XFS_IO_UNWRITTEN;
1421
1422        if (is_async) {
1423                ioend->io_isasync = 1;
1424                xfs_finish_ioend(ioend);
1425        } else {
1426                xfs_finish_ioend_sync(ioend);
1427        }
1428}
1429
1430STATIC ssize_t
1431xfs_vm_direct_IO(
1432        int                     rw,
1433        struct kiocb            *iocb,
1434        const struct iovec      *iov,
1435        loff_t                  offset,
1436        unsigned long           nr_segs)
1437{
1438        struct inode            *inode = iocb->ki_filp->f_mapping->host;
1439        struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
1440        struct xfs_ioend        *ioend = NULL;
1441        ssize_t                 ret;
1442
1443        if (rw & WRITE) {
1444                size_t size = iov_length(iov, nr_segs);
1445
1446                /*
1447                 * We cannot preallocate a size update transaction here as we
1448                 * don't know whether allocation is necessary or not. Hence we
1449                 * can only tell IO completion that one is necessary if we are
1450                 * not doing unwritten extent conversion.
1451                 */
1452                iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1453                if (offset + size > XFS_I(inode)->i_d.di_size)
1454                        ioend->io_isdirect = 1;
1455
1456                ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1457                                            offset, nr_segs,
1458                                            xfs_get_blocks_direct,
1459                                            xfs_end_io_direct_write, NULL, 0);
1460                if (ret != -EIOCBQUEUED && iocb->private)
1461                        goto out_destroy_ioend;
1462        } else {
1463                ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1464                                            offset, nr_segs,
1465                                            xfs_get_blocks_direct,
1466                                            NULL, NULL, 0);
1467        }
1468
1469        return ret;
1470
1471out_destroy_ioend:
1472        xfs_destroy_ioend(ioend);
1473        return ret;
1474}
1475
1476/*
1477 * Punch out the delalloc blocks we have already allocated.
1478 *
1479 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1480 * as the page is still locked at this point.
1481 */
1482STATIC void
1483xfs_vm_kill_delalloc_range(
1484        struct inode            *inode,
1485        loff_t                  start,
1486        loff_t                  end)
1487{
1488        struct xfs_inode        *ip = XFS_I(inode);
1489        xfs_fileoff_t           start_fsb;
1490        xfs_fileoff_t           end_fsb;
1491        int                     error;
1492
1493        start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1494        end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1495        if (end_fsb <= start_fsb)
1496                return;
1497
1498        xfs_ilock(ip, XFS_ILOCK_EXCL);
1499        error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1500                                                end_fsb - start_fsb);
1501        if (error) {
1502                /* something screwed, just bail */
1503                if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1504                        xfs_alert(ip->i_mount,
1505                "xfs_vm_write_failed: unable to clean up ino %lld",
1506                                        ip->i_ino);
1507                }
1508        }
1509        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1510}
1511
1512STATIC void
1513xfs_vm_write_failed(
1514        struct inode            *inode,
1515        struct page             *page,
1516        loff_t                  pos,
1517        unsigned                len)
1518{
1519        loff_t                  block_offset = pos & PAGE_MASK;
1520        loff_t                  block_start;
1521        loff_t                  block_end;
1522        loff_t                  from = pos & (PAGE_CACHE_SIZE - 1);
1523        loff_t                  to = from + len;
1524        struct buffer_head      *bh, *head;
1525
1526        ASSERT(block_offset + from == pos);
1527
1528        head = page_buffers(page);
1529        block_start = 0;
1530        for (bh = head; bh != head || !block_start;
1531             bh = bh->b_this_page, block_start = block_end,
1532                                   block_offset += bh->b_size) {
1533                block_end = block_start + bh->b_size;
1534
1535                /* skip buffers before the write */
1536                if (block_end <= from)
1537                        continue;
1538
1539                /* if the buffer is after the write, we're done */
1540                if (block_start >= to)
1541                        break;
1542
1543                if (!buffer_delay(bh))
1544                        continue;
1545
1546                if (!buffer_new(bh) && block_offset < i_size_read(inode))
1547                        continue;
1548
1549                xfs_vm_kill_delalloc_range(inode, block_offset,
1550                                           block_offset + bh->b_size);
1551        }
1552
1553}
1554
1555/*
1556 * This used to call block_write_begin(), but it unlocks and releases the page
1557 * on error, and we need that page to be able to punch stale delalloc blocks out
1558 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1559 * the appropriate point.
1560 */
1561STATIC int
1562xfs_vm_write_begin(
1563        struct file             *file,
1564        struct address_space    *mapping,
1565        loff_t                  pos,
1566        unsigned                len,
1567        unsigned                flags,
1568        struct page             **pagep,
1569        void                    **fsdata)
1570{
1571        pgoff_t                 index = pos >> PAGE_CACHE_SHIFT;
1572        struct page             *page;
1573        int                     status;
1574
1575        ASSERT(len <= PAGE_CACHE_SIZE);
1576
1577        page = grab_cache_page_write_begin(mapping, index,
1578                                           flags | AOP_FLAG_NOFS);
1579        if (!page)
1580                return -ENOMEM;
1581
1582        status = __block_write_begin(page, pos, len, xfs_get_blocks);
1583        if (unlikely(status)) {
1584                struct inode    *inode = mapping->host;
1585
1586                xfs_vm_write_failed(inode, page, pos, len);
1587                unlock_page(page);
1588
1589                if (pos + len > i_size_read(inode))
1590                        truncate_pagecache(inode, pos + len, i_size_read(inode));
1591
1592                page_cache_release(page);
1593                page = NULL;
1594        }
1595
1596        *pagep = page;
1597        return status;
1598}
1599
1600/*
1601 * On failure, we only need to kill delalloc blocks beyond EOF because they
1602 * will never be written. For blocks within EOF, generic_write_end() zeros them
1603 * so they are safe to leave alone and be written with all the other valid data.
1604 */
1605STATIC int
1606xfs_vm_write_end(
1607        struct file             *file,
1608        struct address_space    *mapping,
1609        loff_t                  pos,
1610        unsigned                len,
1611        unsigned                copied,
1612        struct page             *page,
1613        void                    *fsdata)
1614{
1615        int                     ret;
1616
1617        ASSERT(len <= PAGE_CACHE_SIZE);
1618
1619        ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1620        if (unlikely(ret < len)) {
1621                struct inode    *inode = mapping->host;
1622                size_t          isize = i_size_read(inode);
1623                loff_t          to = pos + len;
1624
1625                if (to > isize) {
1626                        truncate_pagecache(inode, to, isize);
1627                        xfs_vm_kill_delalloc_range(inode, isize, to);
1628                }
1629        }
1630        return ret;
1631}
1632
1633STATIC sector_t
1634xfs_vm_bmap(
1635        struct address_space    *mapping,
1636        sector_t                block)
1637{
1638        struct inode            *inode = (struct inode *)mapping->host;
1639        struct xfs_inode        *ip = XFS_I(inode);
1640
1641        trace_xfs_vm_bmap(XFS_I(inode));
1642        xfs_ilock(ip, XFS_IOLOCK_SHARED);
1643        filemap_write_and_wait(mapping);
1644        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1645        return generic_block_bmap(mapping, block, xfs_get_blocks);
1646}
1647
1648STATIC int
1649xfs_vm_readpage(
1650        struct file             *unused,
1651        struct page             *page)
1652{
1653        return mpage_readpage(page, xfs_get_blocks);
1654}
1655
1656STATIC int
1657xfs_vm_readpages(
1658        struct file             *unused,
1659        struct address_space    *mapping,
1660        struct list_head        *pages,
1661        unsigned                nr_pages)
1662{
1663        return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1664}
1665
1666const struct address_space_operations xfs_address_space_operations = {
1667        .readpage               = xfs_vm_readpage,
1668        .readpages              = xfs_vm_readpages,
1669        .writepage              = xfs_vm_writepage,
1670        .writepages             = xfs_vm_writepages,
1671        .releasepage            = xfs_vm_releasepage,
1672        .invalidatepage         = xfs_vm_invalidatepage,
1673        .write_begin            = xfs_vm_write_begin,
1674        .write_end              = xfs_vm_write_end,
1675        .bmap                   = xfs_vm_bmap,
1676        .direct_IO              = xfs_vm_direct_IO,
1677        .migratepage            = buffer_migrate_page,
1678        .is_partially_uptodate  = block_is_partially_uptodate,
1679        .error_remove_page      = generic_error_remove_page,
1680};
1681