linux/fs/xfs/linux-2.6/xfs_aops.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_bit.h"
  20#include "xfs_log.h"
  21#include "xfs_inum.h"
  22#include "xfs_sb.h"
  23#include "xfs_ag.h"
  24#include "xfs_dir2.h"
  25#include "xfs_trans.h"
  26#include "xfs_dmapi.h"
  27#include "xfs_mount.h"
  28#include "xfs_bmap_btree.h"
  29#include "xfs_alloc_btree.h"
  30#include "xfs_ialloc_btree.h"
  31#include "xfs_dir2_sf.h"
  32#include "xfs_attr_sf.h"
  33#include "xfs_dinode.h"
  34#include "xfs_inode.h"
  35#include "xfs_alloc.h"
  36#include "xfs_btree.h"
  37#include "xfs_error.h"
  38#include "xfs_rw.h"
  39#include "xfs_iomap.h"
  40#include "xfs_vnodeops.h"
  41#include <linux/mpage.h>
  42#include <linux/pagevec.h>
  43#include <linux/writeback.h>
  44
  45
  46/*
  47 * Prime number of hash buckets since address is used as the key.
  48 */
  49#define NVSYNC          37
  50#define to_ioend_wq(v)  (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
  51static wait_queue_head_t xfs_ioend_wq[NVSYNC];
  52
  53void __init
  54xfs_ioend_init(void)
  55{
  56        int i;
  57
  58        for (i = 0; i < NVSYNC; i++)
  59                init_waitqueue_head(&xfs_ioend_wq[i]);
  60}
  61
  62void
  63xfs_ioend_wait(
  64        xfs_inode_t     *ip)
  65{
  66        wait_queue_head_t *wq = to_ioend_wq(ip);
  67
  68        wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
  69}
  70
  71STATIC void
  72xfs_ioend_wake(
  73        xfs_inode_t     *ip)
  74{
  75        if (atomic_dec_and_test(&ip->i_iocount))
  76                wake_up(to_ioend_wq(ip));
  77}
  78
  79STATIC void
  80xfs_count_page_state(
  81        struct page             *page,
  82        int                     *delalloc,
  83        int                     *unmapped,
  84        int                     *unwritten)
  85{
  86        struct buffer_head      *bh, *head;
  87
  88        *delalloc = *unmapped = *unwritten = 0;
  89
  90        bh = head = page_buffers(page);
  91        do {
  92                if (buffer_uptodate(bh) && !buffer_mapped(bh))
  93                        (*unmapped) = 1;
  94                else if (buffer_unwritten(bh))
  95                        (*unwritten) = 1;
  96                else if (buffer_delay(bh))
  97                        (*delalloc) = 1;
  98        } while ((bh = bh->b_this_page) != head);
  99}
 100
 101#if defined(XFS_RW_TRACE)
 102void
 103xfs_page_trace(
 104        int             tag,
 105        struct inode    *inode,
 106        struct page     *page,
 107        unsigned long   pgoff)
 108{
 109        xfs_inode_t     *ip;
 110        loff_t          isize = i_size_read(inode);
 111        loff_t          offset = page_offset(page);
 112        int             delalloc = -1, unmapped = -1, unwritten = -1;
 113
 114        if (page_has_buffers(page))
 115                xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
 116
 117        ip = XFS_I(inode);
 118        if (!ip->i_rwtrace)
 119                return;
 120
 121        ktrace_enter(ip->i_rwtrace,
 122                (void *)((unsigned long)tag),
 123                (void *)ip,
 124                (void *)inode,
 125                (void *)page,
 126                (void *)pgoff,
 127                (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
 128                (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
 129                (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
 130                (void *)((unsigned long)(isize & 0xffffffff)),
 131                (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
 132                (void *)((unsigned long)(offset & 0xffffffff)),
 133                (void *)((unsigned long)delalloc),
 134                (void *)((unsigned long)unmapped),
 135                (void *)((unsigned long)unwritten),
 136                (void *)((unsigned long)current_pid()),
 137                (void *)NULL);
 138}
 139#else
 140#define xfs_page_trace(tag, inode, page, pgoff)
 141#endif
 142
 143STATIC struct block_device *
 144xfs_find_bdev_for_inode(
 145        struct xfs_inode        *ip)
 146{
 147        struct xfs_mount        *mp = ip->i_mount;
 148
 149        if (XFS_IS_REALTIME_INODE(ip))
 150                return mp->m_rtdev_targp->bt_bdev;
 151        else
 152                return mp->m_ddev_targp->bt_bdev;
 153}
 154
 155/*
 156 * We're now finished for good with this ioend structure.
 157 * Update the page state via the associated buffer_heads,
 158 * release holds on the inode and bio, and finally free
 159 * up memory.  Do not use the ioend after this.
 160 */
 161STATIC void
 162xfs_destroy_ioend(
 163        xfs_ioend_t             *ioend)
 164{
 165        struct buffer_head      *bh, *next;
 166        struct xfs_inode        *ip = XFS_I(ioend->io_inode);
 167
 168        for (bh = ioend->io_buffer_head; bh; bh = next) {
 169                next = bh->b_private;
 170                bh->b_end_io(bh, !ioend->io_error);
 171        }
 172
 173        /*
 174         * Volume managers supporting multiple paths can send back ENODEV
 175         * when the final path disappears.  In this case continuing to fill
 176         * the page cache with dirty data which cannot be written out is
 177         * evil, so prevent that.
 178         */
 179        if (unlikely(ioend->io_error == -ENODEV)) {
 180                xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
 181                                      __FILE__, __LINE__);
 182        }
 183
 184        xfs_ioend_wake(ip);
 185        mempool_free(ioend, xfs_ioend_pool);
 186}
 187
 188/*
 189 * If the end of the current ioend is beyond the current EOF,
 190 * return the new EOF value, otherwise zero.
 191 */
 192STATIC xfs_fsize_t
 193xfs_ioend_new_eof(
 194        xfs_ioend_t             *ioend)
 195{
 196        xfs_inode_t             *ip = XFS_I(ioend->io_inode);
 197        xfs_fsize_t             isize;
 198        xfs_fsize_t             bsize;
 199
 200        bsize = ioend->io_offset + ioend->io_size;
 201        isize = MAX(ip->i_size, ip->i_new_size);
 202        isize = MIN(isize, bsize);
 203        return isize > ip->i_d.di_size ? isize : 0;
 204}
 205
 206/*
 207 * Update on-disk file size now that data has been written to disk.
 208 * The current in-memory file size is i_size.  If a write is beyond
 209 * eof i_new_size will be the intended file size until i_size is
 210 * updated.  If this write does not extend all the way to the valid
 211 * file size then restrict this update to the end of the write.
 212 */
 213
 214STATIC void
 215xfs_setfilesize(
 216        xfs_ioend_t             *ioend)
 217{
 218        xfs_inode_t             *ip = XFS_I(ioend->io_inode);
 219        xfs_fsize_t             isize;
 220
 221        ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
 222        ASSERT(ioend->io_type != IOMAP_READ);
 223
 224        if (unlikely(ioend->io_error))
 225                return;
 226
 227        xfs_ilock(ip, XFS_ILOCK_EXCL);
 228        isize = xfs_ioend_new_eof(ioend);
 229        if (isize) {
 230                ip->i_d.di_size = isize;
 231                xfs_mark_inode_dirty_sync(ip);
 232        }
 233
 234        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 235}
 236
 237/*
 238 * Buffered IO write completion for delayed allocate extents.
 239 */
 240STATIC void
 241xfs_end_bio_delalloc(
 242        struct work_struct      *work)
 243{
 244        xfs_ioend_t             *ioend =
 245                container_of(work, xfs_ioend_t, io_work);
 246
 247        xfs_setfilesize(ioend);
 248        xfs_destroy_ioend(ioend);
 249}
 250
 251/*
 252 * Buffered IO write completion for regular, written extents.
 253 */
 254STATIC void
 255xfs_end_bio_written(
 256        struct work_struct      *work)
 257{
 258        xfs_ioend_t             *ioend =
 259                container_of(work, xfs_ioend_t, io_work);
 260
 261        xfs_setfilesize(ioend);
 262        xfs_destroy_ioend(ioend);
 263}
 264
 265/*
 266 * IO write completion for unwritten extents.
 267 *
 268 * Issue transactions to convert a buffer range from unwritten
 269 * to written extents.
 270 */
 271STATIC void
 272xfs_end_bio_unwritten(
 273        struct work_struct      *work)
 274{
 275        xfs_ioend_t             *ioend =
 276                container_of(work, xfs_ioend_t, io_work);
 277        struct xfs_inode        *ip = XFS_I(ioend->io_inode);
 278        xfs_off_t               offset = ioend->io_offset;
 279        size_t                  size = ioend->io_size;
 280
 281        if (likely(!ioend->io_error)) {
 282                if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 283                        int error;
 284                        error = xfs_iomap_write_unwritten(ip, offset, size);
 285                        if (error)
 286                                ioend->io_error = error;
 287                }
 288                xfs_setfilesize(ioend);
 289        }
 290        xfs_destroy_ioend(ioend);
 291}
 292
 293/*
 294 * IO read completion for regular, written extents.
 295 */
 296STATIC void
 297xfs_end_bio_read(
 298        struct work_struct      *work)
 299{
 300        xfs_ioend_t             *ioend =
 301                container_of(work, xfs_ioend_t, io_work);
 302
 303        xfs_destroy_ioend(ioend);
 304}
 305
 306/*
 307 * Schedule IO completion handling on a xfsdatad if this was
 308 * the final hold on this ioend. If we are asked to wait,
 309 * flush the workqueue.
 310 */
 311STATIC void
 312xfs_finish_ioend(
 313        xfs_ioend_t     *ioend,
 314        int             wait)
 315{
 316        if (atomic_dec_and_test(&ioend->io_remaining)) {
 317                struct workqueue_struct *wq = xfsdatad_workqueue;
 318                if (ioend->io_work.func == xfs_end_bio_unwritten)
 319                        wq = xfsconvertd_workqueue;
 320
 321                queue_work(wq, &ioend->io_work);
 322                if (wait)
 323                        flush_workqueue(wq);
 324        }
 325}
 326
 327/*
 328 * Allocate and initialise an IO completion structure.
 329 * We need to track unwritten extent write completion here initially.
 330 * We'll need to extend this for updating the ondisk inode size later
 331 * (vs. incore size).
 332 */
 333STATIC xfs_ioend_t *
 334xfs_alloc_ioend(
 335        struct inode            *inode,
 336        unsigned int            type)
 337{
 338        xfs_ioend_t             *ioend;
 339
 340        ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
 341
 342        /*
 343         * Set the count to 1 initially, which will prevent an I/O
 344         * completion callback from happening before we have started
 345         * all the I/O from calling the completion routine too early.
 346         */
 347        atomic_set(&ioend->io_remaining, 1);
 348        ioend->io_error = 0;
 349        ioend->io_list = NULL;
 350        ioend->io_type = type;
 351        ioend->io_inode = inode;
 352        ioend->io_buffer_head = NULL;
 353        ioend->io_buffer_tail = NULL;
 354        atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
 355        ioend->io_offset = 0;
 356        ioend->io_size = 0;
 357
 358        if (type == IOMAP_UNWRITTEN)
 359                INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
 360        else if (type == IOMAP_DELAY)
 361                INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
 362        else if (type == IOMAP_READ)
 363                INIT_WORK(&ioend->io_work, xfs_end_bio_read);
 364        else
 365                INIT_WORK(&ioend->io_work, xfs_end_bio_written);
 366
 367        return ioend;
 368}
 369
 370STATIC int
 371xfs_map_blocks(
 372        struct inode            *inode,
 373        loff_t                  offset,
 374        ssize_t                 count,
 375        xfs_iomap_t             *mapp,
 376        int                     flags)
 377{
 378        int                     nmaps = 1;
 379
 380        return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
 381}
 382
 383STATIC_INLINE int
 384xfs_iomap_valid(
 385        xfs_iomap_t             *iomapp,
 386        loff_t                  offset)
 387{
 388        return offset >= iomapp->iomap_offset &&
 389                offset < iomapp->iomap_offset + iomapp->iomap_bsize;
 390}
 391
 392/*
 393 * BIO completion handler for buffered IO.
 394 */
 395STATIC void
 396xfs_end_bio(
 397        struct bio              *bio,
 398        int                     error)
 399{
 400        xfs_ioend_t             *ioend = bio->bi_private;
 401
 402        ASSERT(atomic_read(&bio->bi_cnt) >= 1);
 403        ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
 404
 405        /* Toss bio and pass work off to an xfsdatad thread */
 406        bio->bi_private = NULL;
 407        bio->bi_end_io = NULL;
 408        bio_put(bio);
 409
 410        xfs_finish_ioend(ioend, 0);
 411}
 412
 413STATIC void
 414xfs_submit_ioend_bio(
 415        xfs_ioend_t     *ioend,
 416        struct bio      *bio)
 417{
 418        atomic_inc(&ioend->io_remaining);
 419        bio->bi_private = ioend;
 420        bio->bi_end_io = xfs_end_bio;
 421
 422        /*
 423         * If the I/O is beyond EOF we mark the inode dirty immediately
 424         * but don't update the inode size until I/O completion.
 425         */
 426        if (xfs_ioend_new_eof(ioend))
 427                xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
 428
 429        submit_bio(WRITE, bio);
 430        ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
 431        bio_put(bio);
 432}
 433
 434STATIC struct bio *
 435xfs_alloc_ioend_bio(
 436        struct buffer_head      *bh)
 437{
 438        struct bio              *bio;
 439        int                     nvecs = bio_get_nr_vecs(bh->b_bdev);
 440
 441        do {
 442                bio = bio_alloc(GFP_NOIO, nvecs);
 443                nvecs >>= 1;
 444        } while (!bio);
 445
 446        ASSERT(bio->bi_private == NULL);
 447        bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 448        bio->bi_bdev = bh->b_bdev;
 449        bio_get(bio);
 450        return bio;
 451}
 452
 453STATIC void
 454xfs_start_buffer_writeback(
 455        struct buffer_head      *bh)
 456{
 457        ASSERT(buffer_mapped(bh));
 458        ASSERT(buffer_locked(bh));
 459        ASSERT(!buffer_delay(bh));
 460        ASSERT(!buffer_unwritten(bh));
 461
 462        mark_buffer_async_write(bh);
 463        set_buffer_uptodate(bh);
 464        clear_buffer_dirty(bh);
 465}
 466
 467STATIC void
 468xfs_start_page_writeback(
 469        struct page             *page,
 470        int                     clear_dirty,
 471        int                     buffers)
 472{
 473        ASSERT(PageLocked(page));
 474        ASSERT(!PageWriteback(page));
 475        if (clear_dirty)
 476                clear_page_dirty_for_io(page);
 477        set_page_writeback(page);
 478        unlock_page(page);
 479        /* If no buffers on the page are to be written, finish it here */
 480        if (!buffers)
 481                end_page_writeback(page);
 482}
 483
 484static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
 485{
 486        return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
 487}
 488
 489/*
 490 * Submit all of the bios for all of the ioends we have saved up, covering the
 491 * initial writepage page and also any probed pages.
 492 *
 493 * Because we may have multiple ioends spanning a page, we need to start
 494 * writeback on all the buffers before we submit them for I/O. If we mark the
 495 * buffers as we got, then we can end up with a page that only has buffers
 496 * marked async write and I/O complete on can occur before we mark the other
 497 * buffers async write.
 498 *
 499 * The end result of this is that we trip a bug in end_page_writeback() because
 500 * we call it twice for the one page as the code in end_buffer_async_write()
 501 * assumes that all buffers on the page are started at the same time.
 502 *
 503 * The fix is two passes across the ioend list - one to start writeback on the
 504 * buffer_heads, and then submit them for I/O on the second pass.
 505 */
 506STATIC void
 507xfs_submit_ioend(
 508        xfs_ioend_t             *ioend)
 509{
 510        xfs_ioend_t             *head = ioend;
 511        xfs_ioend_t             *next;
 512        struct buffer_head      *bh;
 513        struct bio              *bio;
 514        sector_t                lastblock = 0;
 515
 516        /* Pass 1 - start writeback */
 517        do {
 518                next = ioend->io_list;
 519                for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
 520                        xfs_start_buffer_writeback(bh);
 521                }
 522        } while ((ioend = next) != NULL);
 523
 524        /* Pass 2 - submit I/O */
 525        ioend = head;
 526        do {
 527                next = ioend->io_list;
 528                bio = NULL;
 529
 530                for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
 531
 532                        if (!bio) {
 533 retry:
 534                                bio = xfs_alloc_ioend_bio(bh);
 535                        } else if (bh->b_blocknr != lastblock + 1) {
 536                                xfs_submit_ioend_bio(ioend, bio);
 537                                goto retry;
 538                        }
 539
 540                        if (bio_add_buffer(bio, bh) != bh->b_size) {
 541                                xfs_submit_ioend_bio(ioend, bio);
 542                                goto retry;
 543                        }
 544
 545                        lastblock = bh->b_blocknr;
 546                }
 547                if (bio)
 548                        xfs_submit_ioend_bio(ioend, bio);
 549                xfs_finish_ioend(ioend, 0);
 550        } while ((ioend = next) != NULL);
 551}
 552
 553/*
 554 * Cancel submission of all buffer_heads so far in this endio.
 555 * Toss the endio too.  Only ever called for the initial page
 556 * in a writepage request, so only ever one page.
 557 */
 558STATIC void
 559xfs_cancel_ioend(
 560        xfs_ioend_t             *ioend)
 561{
 562        xfs_ioend_t             *next;
 563        struct buffer_head      *bh, *next_bh;
 564
 565        do {
 566                next = ioend->io_list;
 567                bh = ioend->io_buffer_head;
 568                do {
 569                        next_bh = bh->b_private;
 570                        clear_buffer_async_write(bh);
 571                        unlock_buffer(bh);
 572                } while ((bh = next_bh) != NULL);
 573
 574                xfs_ioend_wake(XFS_I(ioend->io_inode));
 575                mempool_free(ioend, xfs_ioend_pool);
 576        } while ((ioend = next) != NULL);
 577}
 578
 579/*
 580 * Test to see if we've been building up a completion structure for
 581 * earlier buffers -- if so, we try to append to this ioend if we
 582 * can, otherwise we finish off any current ioend and start another.
 583 * Return true if we've finished the given ioend.
 584 */
 585STATIC void
 586xfs_add_to_ioend(
 587        struct inode            *inode,
 588        struct buffer_head      *bh,
 589        xfs_off_t               offset,
 590        unsigned int            type,
 591        xfs_ioend_t             **result,
 592        int                     need_ioend)
 593{
 594        xfs_ioend_t             *ioend = *result;
 595
 596        if (!ioend || need_ioend || type != ioend->io_type) {
 597                xfs_ioend_t     *previous = *result;
 598
 599                ioend = xfs_alloc_ioend(inode, type);
 600                ioend->io_offset = offset;
 601                ioend->io_buffer_head = bh;
 602                ioend->io_buffer_tail = bh;
 603                if (previous)
 604                        previous->io_list = ioend;
 605                *result = ioend;
 606        } else {
 607                ioend->io_buffer_tail->b_private = bh;
 608                ioend->io_buffer_tail = bh;
 609        }
 610
 611        bh->b_private = NULL;
 612        ioend->io_size += bh->b_size;
 613}
 614
 615STATIC void
 616xfs_map_buffer(
 617        struct buffer_head      *bh,
 618        xfs_iomap_t             *mp,
 619        xfs_off_t               offset,
 620        uint                    block_bits)
 621{
 622        sector_t                bn;
 623
 624        ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
 625
 626        bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
 627              ((offset - mp->iomap_offset) >> block_bits);
 628
 629        ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
 630
 631        bh->b_blocknr = bn;
 632        set_buffer_mapped(bh);
 633}
 634
 635STATIC void
 636xfs_map_at_offset(
 637        struct buffer_head      *bh,
 638        loff_t                  offset,
 639        int                     block_bits,
 640        xfs_iomap_t             *iomapp)
 641{
 642        ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
 643        ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
 644
 645        lock_buffer(bh);
 646        xfs_map_buffer(bh, iomapp, offset, block_bits);
 647        bh->b_bdev = iomapp->iomap_target->bt_bdev;
 648        set_buffer_mapped(bh);
 649        clear_buffer_delay(bh);
 650        clear_buffer_unwritten(bh);
 651}
 652
 653/*
 654 * Look for a page at index that is suitable for clustering.
 655 */
 656STATIC unsigned int
 657xfs_probe_page(
 658        struct page             *page,
 659        unsigned int            pg_offset,
 660        int                     mapped)
 661{
 662        int                     ret = 0;
 663
 664        if (PageWriteback(page))
 665                return 0;
 666
 667        if (page->mapping && PageDirty(page)) {
 668                if (page_has_buffers(page)) {
 669                        struct buffer_head      *bh, *head;
 670
 671                        bh = head = page_buffers(page);
 672                        do {
 673                                if (!buffer_uptodate(bh))
 674                                        break;
 675                                if (mapped != buffer_mapped(bh))
 676                                        break;
 677                                ret += bh->b_size;
 678                                if (ret >= pg_offset)
 679                                        break;
 680                        } while ((bh = bh->b_this_page) != head);
 681                } else
 682                        ret = mapped ? 0 : PAGE_CACHE_SIZE;
 683        }
 684
 685        return ret;
 686}
 687
 688STATIC size_t
 689xfs_probe_cluster(
 690        struct inode            *inode,
 691        struct page             *startpage,
 692        struct buffer_head      *bh,
 693        struct buffer_head      *head,
 694        int                     mapped)
 695{
 696        struct pagevec          pvec;
 697        pgoff_t                 tindex, tlast, tloff;
 698        size_t                  total = 0;
 699        int                     done = 0, i;
 700
 701        /* First sum forwards in this page */
 702        do {
 703                if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
 704                        return total;
 705                total += bh->b_size;
 706        } while ((bh = bh->b_this_page) != head);
 707
 708        /* if we reached the end of the page, sum forwards in following pages */
 709        tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
 710        tindex = startpage->index + 1;
 711
 712        /* Prune this back to avoid pathological behavior */
 713        tloff = min(tlast, startpage->index + 64);
 714
 715        pagevec_init(&pvec, 0);
 716        while (!done && tindex <= tloff) {
 717                unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
 718
 719                if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
 720                        break;
 721
 722                for (i = 0; i < pagevec_count(&pvec); i++) {
 723                        struct page *page = pvec.pages[i];
 724                        size_t pg_offset, pg_len = 0;
 725
 726                        if (tindex == tlast) {
 727                                pg_offset =
 728                                    i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
 729                                if (!pg_offset) {
 730                                        done = 1;
 731                                        break;
 732                                }
 733                        } else
 734                                pg_offset = PAGE_CACHE_SIZE;
 735
 736                        if (page->index == tindex && trylock_page(page)) {
 737                                pg_len = xfs_probe_page(page, pg_offset, mapped);
 738                                unlock_page(page);
 739                        }
 740
 741                        if (!pg_len) {
 742                                done = 1;
 743                                break;
 744                        }
 745
 746                        total += pg_len;
 747                        tindex++;
 748                }
 749
 750                pagevec_release(&pvec);
 751                cond_resched();
 752        }
 753
 754        return total;
 755}
 756
 757/*
 758 * Test if a given page is suitable for writing as part of an unwritten
 759 * or delayed allocate extent.
 760 */
 761STATIC int
 762xfs_is_delayed_page(
 763        struct page             *page,
 764        unsigned int            type)
 765{
 766        if (PageWriteback(page))
 767                return 0;
 768
 769        if (page->mapping && page_has_buffers(page)) {
 770                struct buffer_head      *bh, *head;
 771                int                     acceptable = 0;
 772
 773                bh = head = page_buffers(page);
 774                do {
 775                        if (buffer_unwritten(bh))
 776                                acceptable = (type == IOMAP_UNWRITTEN);
 777                        else if (buffer_delay(bh))
 778                                acceptable = (type == IOMAP_DELAY);
 779                        else if (buffer_dirty(bh) && buffer_mapped(bh))
 780                                acceptable = (type == IOMAP_NEW);
 781                        else
 782                                break;
 783                } while ((bh = bh->b_this_page) != head);
 784
 785                if (acceptable)
 786                        return 1;
 787        }
 788
 789        return 0;
 790}
 791
 792/*
 793 * Allocate & map buffers for page given the extent map. Write it out.
 794 * except for the original page of a writepage, this is called on
 795 * delalloc/unwritten pages only, for the original page it is possible
 796 * that the page has no mapping at all.
 797 */
 798STATIC int
 799xfs_convert_page(
 800        struct inode            *inode,
 801        struct page             *page,
 802        loff_t                  tindex,
 803        xfs_iomap_t             *mp,
 804        xfs_ioend_t             **ioendp,
 805        struct writeback_control *wbc,
 806        int                     startio,
 807        int                     all_bh)
 808{
 809        struct buffer_head      *bh, *head;
 810        xfs_off_t               end_offset;
 811        unsigned long           p_offset;
 812        unsigned int            type;
 813        int                     bbits = inode->i_blkbits;
 814        int                     len, page_dirty;
 815        int                     count = 0, done = 0, uptodate = 1;
 816        xfs_off_t               offset = page_offset(page);
 817
 818        if (page->index != tindex)
 819                goto fail;
 820        if (!trylock_page(page))
 821                goto fail;
 822        if (PageWriteback(page))
 823                goto fail_unlock_page;
 824        if (page->mapping != inode->i_mapping)
 825                goto fail_unlock_page;
 826        if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
 827                goto fail_unlock_page;
 828
 829        /*
 830         * page_dirty is initially a count of buffers on the page before
 831         * EOF and is decremented as we move each into a cleanable state.
 832         *
 833         * Derivation:
 834         *
 835         * End offset is the highest offset that this page should represent.
 836         * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
 837         * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
 838         * hence give us the correct page_dirty count. On any other page,
 839         * it will be zero and in that case we need page_dirty to be the
 840         * count of buffers on the page.
 841         */
 842        end_offset = min_t(unsigned long long,
 843                        (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
 844                        i_size_read(inode));
 845
 846        len = 1 << inode->i_blkbits;
 847        p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
 848                                        PAGE_CACHE_SIZE);
 849        p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
 850        page_dirty = p_offset / len;
 851
 852        bh = head = page_buffers(page);
 853        do {
 854                if (offset >= end_offset)
 855                        break;
 856                if (!buffer_uptodate(bh))
 857                        uptodate = 0;
 858                if (!(PageUptodate(page) || buffer_uptodate(bh))) {
 859                        done = 1;
 860                        continue;
 861                }
 862
 863                if (buffer_unwritten(bh) || buffer_delay(bh)) {
 864                        if (buffer_unwritten(bh))
 865                                type = IOMAP_UNWRITTEN;
 866                        else
 867                                type = IOMAP_DELAY;
 868
 869                        if (!xfs_iomap_valid(mp, offset)) {
 870                                done = 1;
 871                                continue;
 872                        }
 873
 874                        ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
 875                        ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
 876
 877                        xfs_map_at_offset(bh, offset, bbits, mp);
 878                        if (startio) {
 879                                xfs_add_to_ioend(inode, bh, offset,
 880                                                type, ioendp, done);
 881                        } else {
 882                                set_buffer_dirty(bh);
 883                                unlock_buffer(bh);
 884                                mark_buffer_dirty(bh);
 885                        }
 886                        page_dirty--;
 887                        count++;
 888                } else {
 889                        type = IOMAP_NEW;
 890                        if (buffer_mapped(bh) && all_bh && startio) {
 891                                lock_buffer(bh);
 892                                xfs_add_to_ioend(inode, bh, offset,
 893                                                type, ioendp, done);
 894                                count++;
 895                                page_dirty--;
 896                        } else {
 897                                done = 1;
 898                        }
 899                }
 900        } while (offset += len, (bh = bh->b_this_page) != head);
 901
 902        if (uptodate && bh == head)
 903                SetPageUptodate(page);
 904
 905        if (startio) {
 906                if (count) {
 907                        struct backing_dev_info *bdi;
 908
 909                        bdi = inode->i_mapping->backing_dev_info;
 910                        wbc->nr_to_write--;
 911                        if (bdi_write_congested(bdi)) {
 912                                wbc->encountered_congestion = 1;
 913                                done = 1;
 914                        } else if (wbc->nr_to_write <= 0) {
 915                                done = 1;
 916                        }
 917                }
 918                xfs_start_page_writeback(page, !page_dirty, count);
 919        }
 920
 921        return done;
 922 fail_unlock_page:
 923        unlock_page(page);
 924 fail:
 925        return 1;
 926}
 927
 928/*
 929 * Convert & write out a cluster of pages in the same extent as defined
 930 * by mp and following the start page.
 931 */
 932STATIC void
 933xfs_cluster_write(
 934        struct inode            *inode,
 935        pgoff_t                 tindex,
 936        xfs_iomap_t             *iomapp,
 937        xfs_ioend_t             **ioendp,
 938        struct writeback_control *wbc,
 939        int                     startio,
 940        int                     all_bh,
 941        pgoff_t                 tlast)
 942{
 943        struct pagevec          pvec;
 944        int                     done = 0, i;
 945
 946        pagevec_init(&pvec, 0);
 947        while (!done && tindex <= tlast) {
 948                unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
 949
 950                if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
 951                        break;
 952
 953                for (i = 0; i < pagevec_count(&pvec); i++) {
 954                        done = xfs_convert_page(inode, pvec.pages[i], tindex++,
 955                                        iomapp, ioendp, wbc, startio, all_bh);
 956                        if (done)
 957                                break;
 958                }
 959
 960                pagevec_release(&pvec);
 961                cond_resched();
 962        }
 963}
 964
 965/*
 966 * Calling this without startio set means we are being asked to make a dirty
 967 * page ready for freeing it's buffers.  When called with startio set then
 968 * we are coming from writepage.
 969 *
 970 * When called with startio set it is important that we write the WHOLE
 971 * page if possible.
 972 * The bh->b_state's cannot know if any of the blocks or which block for
 973 * that matter are dirty due to mmap writes, and therefore bh uptodate is
 974 * only valid if the page itself isn't completely uptodate.  Some layers
 975 * may clear the page dirty flag prior to calling write page, under the
 976 * assumption the entire page will be written out; by not writing out the
 977 * whole page the page can be reused before all valid dirty data is
 978 * written out.  Note: in the case of a page that has been dirty'd by
 979 * mapwrite and but partially setup by block_prepare_write the
 980 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
 981 * valid state, thus the whole page must be written out thing.
 982 */
 983
 984STATIC int
 985xfs_page_state_convert(
 986        struct inode    *inode,
 987        struct page     *page,
 988        struct writeback_control *wbc,
 989        int             startio,
 990        int             unmapped) /* also implies page uptodate */
 991{
 992        struct buffer_head      *bh, *head;
 993        xfs_iomap_t             iomap;
 994        xfs_ioend_t             *ioend = NULL, *iohead = NULL;
 995        loff_t                  offset;
 996        unsigned long           p_offset = 0;
 997        unsigned int            type;
 998        __uint64_t              end_offset;
 999        pgoff_t                 end_index, last_index, tlast;
1000        ssize_t                 size, len;
1001        int                     flags, err, iomap_valid = 0, uptodate = 1;
1002        int                     page_dirty, count = 0;
1003        int                     trylock = 0;
1004        int                     all_bh = unmapped;
1005
1006        if (startio) {
1007                if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
1008                        trylock |= BMAPI_TRYLOCK;
1009        }
1010
1011        /* Is this page beyond the end of the file? */
1012        offset = i_size_read(inode);
1013        end_index = offset >> PAGE_CACHE_SHIFT;
1014        last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
1015        if (page->index >= end_index) {
1016                if ((page->index >= end_index + 1) ||
1017                    !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
1018                        if (startio)
1019                                unlock_page(page);
1020                        return 0;
1021                }
1022        }
1023
1024        /*
1025         * page_dirty is initially a count of buffers on the page before
1026         * EOF and is decremented as we move each into a cleanable state.
1027         *
1028         * Derivation:
1029         *
1030         * End offset is the highest offset that this page should represent.
1031         * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
1032         * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
1033         * hence give us the correct page_dirty count. On any other page,
1034         * it will be zero and in that case we need page_dirty to be the
1035         * count of buffers on the page.
1036         */
1037        end_offset = min_t(unsigned long long,
1038                        (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
1039        len = 1 << inode->i_blkbits;
1040        p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
1041                                        PAGE_CACHE_SIZE);
1042        p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
1043        page_dirty = p_offset / len;
1044
1045        bh = head = page_buffers(page);
1046        offset = page_offset(page);
1047        flags = BMAPI_READ;
1048        type = IOMAP_NEW;
1049
1050        /* TODO: cleanup count and page_dirty */
1051
1052        do {
1053                if (offset >= end_offset)
1054                        break;
1055                if (!buffer_uptodate(bh))
1056                        uptodate = 0;
1057                if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
1058                        /*
1059                         * the iomap is actually still valid, but the ioend
1060                         * isn't.  shouldn't happen too often.
1061                         */
1062                        iomap_valid = 0;
1063                        continue;
1064                }
1065
1066                if (iomap_valid)
1067                        iomap_valid = xfs_iomap_valid(&iomap, offset);
1068
1069                /*
1070                 * First case, map an unwritten extent and prepare for
1071                 * extent state conversion transaction on completion.
1072                 *
1073                 * Second case, allocate space for a delalloc buffer.
1074                 * We can return EAGAIN here in the release page case.
1075                 *
1076                 * Third case, an unmapped buffer was found, and we are
1077                 * in a path where we need to write the whole page out.
1078                 */
1079                if (buffer_unwritten(bh) || buffer_delay(bh) ||
1080                    ((buffer_uptodate(bh) || PageUptodate(page)) &&
1081                     !buffer_mapped(bh) && (unmapped || startio))) {
1082                        int new_ioend = 0;
1083
1084                        /*
1085                         * Make sure we don't use a read-only iomap
1086                         */
1087                        if (flags == BMAPI_READ)
1088                                iomap_valid = 0;
1089
1090                        if (buffer_unwritten(bh)) {
1091                                type = IOMAP_UNWRITTEN;
1092                                flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1093                        } else if (buffer_delay(bh)) {
1094                                type = IOMAP_DELAY;
1095                                flags = BMAPI_ALLOCATE | trylock;
1096                        } else {
1097                                type = IOMAP_NEW;
1098                                flags = BMAPI_WRITE | BMAPI_MMAP;
1099                        }
1100
1101                        if (!iomap_valid) {
1102                                /*
1103                                 * if we didn't have a valid mapping then we
1104                                 * need to ensure that we put the new mapping
1105                                 * in a new ioend structure. This needs to be
1106                                 * done to ensure that the ioends correctly
1107                                 * reflect the block mappings at io completion
1108                                 * for unwritten extent conversion.
1109                                 */
1110                                new_ioend = 1;
1111                                if (type == IOMAP_NEW) {
1112                                        size = xfs_probe_cluster(inode,
1113                                                        page, bh, head, 0);
1114                                } else {
1115                                        size = len;
1116                                }
1117
1118                                err = xfs_map_blocks(inode, offset, size,
1119                                                &iomap, flags);
1120                                if (err)
1121                                        goto error;
1122                                iomap_valid = xfs_iomap_valid(&iomap, offset);
1123                        }
1124                        if (iomap_valid) {
1125                                xfs_map_at_offset(bh, offset,
1126                                                inode->i_blkbits, &iomap);
1127                                if (startio) {
1128                                        xfs_add_to_ioend(inode, bh, offset,
1129                                                        type, &ioend,
1130                                                        new_ioend);
1131                                } else {
1132                                        set_buffer_dirty(bh);
1133                                        unlock_buffer(bh);
1134                                        mark_buffer_dirty(bh);
1135                                }
1136                                page_dirty--;
1137                                count++;
1138                        }
1139                } else if (buffer_uptodate(bh) && startio) {
1140                        /*
1141                         * we got here because the buffer is already mapped.
1142                         * That means it must already have extents allocated
1143                         * underneath it. Map the extent by reading it.
1144                         */
1145                        if (!iomap_valid || flags != BMAPI_READ) {
1146                                flags = BMAPI_READ;
1147                                size = xfs_probe_cluster(inode, page, bh,
1148                                                                head, 1);
1149                                err = xfs_map_blocks(inode, offset, size,
1150                                                &iomap, flags);
1151                                if (err)
1152                                        goto error;
1153                                iomap_valid = xfs_iomap_valid(&iomap, offset);
1154                        }
1155
1156                        /*
1157                         * We set the type to IOMAP_NEW in case we are doing a
1158                         * small write at EOF that is extending the file but
1159                         * without needing an allocation. We need to update the
1160                         * file size on I/O completion in this case so it is
1161                         * the same case as having just allocated a new extent
1162                         * that we are writing into for the first time.
1163                         */
1164                        type = IOMAP_NEW;
1165                        if (trylock_buffer(bh)) {
1166                                ASSERT(buffer_mapped(bh));
1167                                if (iomap_valid)
1168                                        all_bh = 1;
1169                                xfs_add_to_ioend(inode, bh, offset, type,
1170                                                &ioend, !iomap_valid);
1171                                page_dirty--;
1172                                count++;
1173                        } else {
1174                                iomap_valid = 0;
1175                        }
1176                } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1177                           (unmapped || startio)) {
1178                        iomap_valid = 0;
1179                }
1180
1181                if (!iohead)
1182                        iohead = ioend;
1183
1184        } while (offset += len, ((bh = bh->b_this_page) != head));
1185
1186        if (uptodate && bh == head)
1187                SetPageUptodate(page);
1188
1189        if (startio)
1190                xfs_start_page_writeback(page, 1, count);
1191
1192        if (ioend && iomap_valid) {
1193                offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
1194                                        PAGE_CACHE_SHIFT;
1195                tlast = min_t(pgoff_t, offset, last_index);
1196                xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
1197                                        wbc, startio, all_bh, tlast);
1198        }
1199
1200        if (iohead)
1201                xfs_submit_ioend(iohead);
1202
1203        return page_dirty;
1204
1205error:
1206        if (iohead)
1207                xfs_cancel_ioend(iohead);
1208
1209        /*
1210         * If it's delalloc and we have nowhere to put it,
1211         * throw it away, unless the lower layers told
1212         * us to try again.
1213         */
1214        if (err != -EAGAIN) {
1215                if (!unmapped)
1216                        block_invalidatepage(page, 0);
1217                ClearPageUptodate(page);
1218        }
1219        return err;
1220}
1221
1222/*
1223 * writepage: Called from one of two places:
1224 *
1225 * 1. we are flushing a delalloc buffer head.
1226 *
1227 * 2. we are writing out a dirty page. Typically the page dirty
1228 *    state is cleared before we get here. In this case is it
1229 *    conceivable we have no buffer heads.
1230 *
1231 * For delalloc space on the page we need to allocate space and
1232 * flush it. For unmapped buffer heads on the page we should
1233 * allocate space if the page is uptodate. For any other dirty
1234 * buffer heads on the page we should flush them.
1235 *
1236 * If we detect that a transaction would be required to flush
1237 * the page, we have to check the process flags first, if we
1238 * are already in a transaction or disk I/O during allocations
1239 * is off, we need to fail the writepage and redirty the page.
1240 */
1241
1242STATIC int
1243xfs_vm_writepage(
1244        struct page             *page,
1245        struct writeback_control *wbc)
1246{
1247        int                     error;
1248        int                     need_trans;
1249        int                     delalloc, unmapped, unwritten;
1250        struct inode            *inode = page->mapping->host;
1251
1252        xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1253
1254        /*
1255         * We need a transaction if:
1256         *  1. There are delalloc buffers on the page
1257         *  2. The page is uptodate and we have unmapped buffers
1258         *  3. The page is uptodate and we have no buffers
1259         *  4. There are unwritten buffers on the page
1260         */
1261
1262        if (!page_has_buffers(page)) {
1263                unmapped = 1;
1264                need_trans = 1;
1265        } else {
1266                xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1267                if (!PageUptodate(page))
1268                        unmapped = 0;
1269                need_trans = delalloc + unmapped + unwritten;
1270        }
1271
1272        /*
1273         * If we need a transaction and the process flags say
1274         * we are already in a transaction, or no IO is allowed
1275         * then mark the page dirty again and leave the page
1276         * as is.
1277         */
1278        if (current_test_flags(PF_FSTRANS) && need_trans)
1279                goto out_fail;
1280
1281        /*
1282         * Delay hooking up buffer heads until we have
1283         * made our go/no-go decision.
1284         */
1285        if (!page_has_buffers(page))
1286                create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1287
1288
1289        /*
1290         *  VM calculation for nr_to_write seems off.  Bump it way
1291         *  up, this gets simple streaming writes zippy again.
1292         *  To be reviewed again after Jens' writeback changes.
1293         */
1294        wbc->nr_to_write *= 4;
1295
1296        /*
1297         * Convert delayed allocate, unwritten or unmapped space
1298         * to real space and flush out to disk.
1299         */
1300        error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1301        if (error == -EAGAIN)
1302                goto out_fail;
1303        if (unlikely(error < 0))
1304                goto out_unlock;
1305
1306        return 0;
1307
1308out_fail:
1309        redirty_page_for_writepage(wbc, page);
1310        unlock_page(page);
1311        return 0;
1312out_unlock:
1313        unlock_page(page);
1314        return error;
1315}
1316
1317STATIC int
1318xfs_vm_writepages(
1319        struct address_space    *mapping,
1320        struct writeback_control *wbc)
1321{
1322        xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1323        return generic_writepages(mapping, wbc);
1324}
1325
1326/*
1327 * Called to move a page into cleanable state - and from there
1328 * to be released. Possibly the page is already clean. We always
1329 * have buffer heads in this call.
1330 *
1331 * Returns 0 if the page is ok to release, 1 otherwise.
1332 *
1333 * Possible scenarios are:
1334 *
1335 * 1. We are being called to release a page which has been written
1336 *    to via regular I/O. buffer heads will be dirty and possibly
1337 *    delalloc. If no delalloc buffer heads in this case then we
1338 *    can just return zero.
1339 *
1340 * 2. We are called to release a page which has been written via
1341 *    mmap, all we need to do is ensure there is no delalloc
1342 *    state in the buffer heads, if not we can let the caller
1343 *    free them and we should come back later via writepage.
1344 */
1345STATIC int
1346xfs_vm_releasepage(
1347        struct page             *page,
1348        gfp_t                   gfp_mask)
1349{
1350        struct inode            *inode = page->mapping->host;
1351        int                     dirty, delalloc, unmapped, unwritten;
1352        struct writeback_control wbc = {
1353                .sync_mode = WB_SYNC_ALL,
1354                .nr_to_write = 1,
1355        };
1356
1357        xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
1358
1359        if (!page_has_buffers(page))
1360                return 0;
1361
1362        xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1363        if (!delalloc && !unwritten)
1364                goto free_buffers;
1365
1366        if (!(gfp_mask & __GFP_FS))
1367                return 0;
1368
1369        /* If we are already inside a transaction or the thread cannot
1370         * do I/O, we cannot release this page.
1371         */
1372        if (current_test_flags(PF_FSTRANS))
1373                return 0;
1374
1375        /*
1376         * Convert delalloc space to real space, do not flush the
1377         * data out to disk, that will be done by the caller.
1378         * Never need to allocate space here - we will always
1379         * come back to writepage in that case.
1380         */
1381        dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1382        if (dirty == 0 && !unwritten)
1383                goto free_buffers;
1384        return 0;
1385
1386free_buffers:
1387        return try_to_free_buffers(page);
1388}
1389
1390STATIC int
1391__xfs_get_blocks(
1392        struct inode            *inode,
1393        sector_t                iblock,
1394        struct buffer_head      *bh_result,
1395        int                     create,
1396        int                     direct,
1397        bmapi_flags_t           flags)
1398{
1399        xfs_iomap_t             iomap;
1400        xfs_off_t               offset;
1401        ssize_t                 size;
1402        int                     niomap = 1;
1403        int                     error;
1404
1405        offset = (xfs_off_t)iblock << inode->i_blkbits;
1406        ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1407        size = bh_result->b_size;
1408
1409        if (!create && direct && offset >= i_size_read(inode))
1410                return 0;
1411
1412        error = xfs_iomap(XFS_I(inode), offset, size,
1413                             create ? flags : BMAPI_READ, &iomap, &niomap);
1414        if (error)
1415                return -error;
1416        if (niomap == 0)
1417                return 0;
1418
1419        if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
1420                /*
1421                 * For unwritten extents do not report a disk address on
1422                 * the read case (treat as if we're reading into a hole).
1423                 */
1424                if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1425                        xfs_map_buffer(bh_result, &iomap, offset,
1426                                       inode->i_blkbits);
1427                }
1428                if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1429                        if (direct)
1430                                bh_result->b_private = inode;
1431                        set_buffer_unwritten(bh_result);
1432                }
1433        }
1434
1435        /*
1436         * If this is a realtime file, data may be on a different device.
1437         * to that pointed to from the buffer_head b_bdev currently.
1438         */
1439        bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1440
1441        /*
1442         * If we previously allocated a block out beyond eof and we are now
1443         * coming back to use it then we will need to flag it as new even if it
1444         * has a disk address.
1445         *
1446         * With sub-block writes into unwritten extents we also need to mark
1447         * the buffer as new so that the unwritten parts of the buffer gets
1448         * correctly zeroed.
1449         */
1450        if (create &&
1451            ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1452             (offset >= i_size_read(inode)) ||
1453             (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
1454                set_buffer_new(bh_result);
1455
1456        if (iomap.iomap_flags & IOMAP_DELAY) {
1457                BUG_ON(direct);
1458                if (create) {
1459                        set_buffer_uptodate(bh_result);
1460                        set_buffer_mapped(bh_result);
1461                        set_buffer_delay(bh_result);
1462                }
1463        }
1464
1465        if (direct || size > (1 << inode->i_blkbits)) {
1466                ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1467                offset = min_t(xfs_off_t,
1468                                iomap.iomap_bsize - iomap.iomap_delta, size);
1469                bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1470        }
1471
1472        return 0;
1473}
1474
1475int
1476xfs_get_blocks(
1477        struct inode            *inode,
1478        sector_t                iblock,
1479        struct buffer_head      *bh_result,
1480        int                     create)
1481{
1482        return __xfs_get_blocks(inode, iblock,
1483                                bh_result, create, 0, BMAPI_WRITE);
1484}
1485
1486STATIC int
1487xfs_get_blocks_direct(
1488        struct inode            *inode,
1489        sector_t                iblock,
1490        struct buffer_head      *bh_result,
1491        int                     create)
1492{
1493        return __xfs_get_blocks(inode, iblock,
1494                                bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1495}
1496
1497STATIC void
1498xfs_end_io_direct(
1499        struct kiocb    *iocb,
1500        loff_t          offset,
1501        ssize_t         size,
1502        void            *private)
1503{
1504        xfs_ioend_t     *ioend = iocb->private;
1505
1506        /*
1507         * Non-NULL private data means we need to issue a transaction to
1508         * convert a range from unwritten to written extents.  This needs
1509         * to happen from process context but aio+dio I/O completion
1510         * happens from irq context so we need to defer it to a workqueue.
1511         * This is not necessary for synchronous direct I/O, but we do
1512         * it anyway to keep the code uniform and simpler.
1513         *
1514         * Well, if only it were that simple. Because synchronous direct I/O
1515         * requires extent conversion to occur *before* we return to userspace,
1516         * we have to wait for extent conversion to complete. Look at the
1517         * iocb that has been passed to us to determine if this is AIO or
1518         * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1519         * workqueue and wait for it to complete.
1520         *
1521         * The core direct I/O code might be changed to always call the
1522         * completion handler in the future, in which case all this can
1523         * go away.
1524         */
1525        ioend->io_offset = offset;
1526        ioend->io_size = size;
1527        if (ioend->io_type == IOMAP_READ) {
1528                xfs_finish_ioend(ioend, 0);
1529        } else if (private && size > 0) {
1530                xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
1531        } else {
1532                /*
1533                 * A direct I/O write ioend starts it's life in unwritten
1534                 * state in case they map an unwritten extent.  This write
1535                 * didn't map an unwritten extent so switch it's completion
1536                 * handler.
1537                 */
1538                INIT_WORK(&ioend->io_work, xfs_end_bio_written);
1539                xfs_finish_ioend(ioend, 0);
1540        }
1541
1542        /*
1543         * blockdev_direct_IO can return an error even after the I/O
1544         * completion handler was called.  Thus we need to protect
1545         * against double-freeing.
1546         */
1547        iocb->private = NULL;
1548}
1549
1550STATIC ssize_t
1551xfs_vm_direct_IO(
1552        int                     rw,
1553        struct kiocb            *iocb,
1554        const struct iovec      *iov,
1555        loff_t                  offset,
1556        unsigned long           nr_segs)
1557{
1558        struct file     *file = iocb->ki_filp;
1559        struct inode    *inode = file->f_mapping->host;
1560        struct block_device *bdev;
1561        ssize_t         ret;
1562
1563        bdev = xfs_find_bdev_for_inode(XFS_I(inode));
1564
1565        if (rw == WRITE) {
1566                iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
1567                ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1568                        bdev, iov, offset, nr_segs,
1569                        xfs_get_blocks_direct,
1570                        xfs_end_io_direct);
1571        } else {
1572                iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
1573                ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1574                        bdev, iov, offset, nr_segs,
1575                        xfs_get_blocks_direct,
1576                        xfs_end_io_direct);
1577        }
1578
1579        if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1580                xfs_destroy_ioend(iocb->private);
1581        return ret;
1582}
1583
1584STATIC int
1585xfs_vm_write_begin(
1586        struct file             *file,
1587        struct address_space    *mapping,
1588        loff_t                  pos,
1589        unsigned                len,
1590        unsigned                flags,
1591        struct page             **pagep,
1592        void                    **fsdata)
1593{
1594        *pagep = NULL;
1595        return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1596                                                                xfs_get_blocks);
1597}
1598
1599STATIC sector_t
1600xfs_vm_bmap(
1601        struct address_space    *mapping,
1602        sector_t                block)
1603{
1604        struct inode            *inode = (struct inode *)mapping->host;
1605        struct xfs_inode        *ip = XFS_I(inode);
1606
1607        xfs_itrace_entry(XFS_I(inode));
1608        xfs_ilock(ip, XFS_IOLOCK_SHARED);
1609        xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1610        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1611        return generic_block_bmap(mapping, block, xfs_get_blocks);
1612}
1613
1614STATIC int
1615xfs_vm_readpage(
1616        struct file             *unused,
1617        struct page             *page)
1618{
1619        return mpage_readpage(page, xfs_get_blocks);
1620}
1621
1622STATIC int
1623xfs_vm_readpages(
1624        struct file             *unused,
1625        struct address_space    *mapping,
1626        struct list_head        *pages,
1627        unsigned                nr_pages)
1628{
1629        return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1630}
1631
1632STATIC void
1633xfs_vm_invalidatepage(
1634        struct page             *page,
1635        unsigned long           offset)
1636{
1637        xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1638                        page->mapping->host, page, offset);
1639        block_invalidatepage(page, offset);
1640}
1641
1642const struct address_space_operations xfs_address_space_operations = {
1643        .readpage               = xfs_vm_readpage,
1644        .readpages              = xfs_vm_readpages,
1645        .writepage              = xfs_vm_writepage,
1646        .writepages             = xfs_vm_writepages,
1647        .sync_page              = block_sync_page,
1648        .releasepage            = xfs_vm_releasepage,
1649        .invalidatepage         = xfs_vm_invalidatepage,
1650        .write_begin            = xfs_vm_write_begin,
1651        .write_end              = generic_write_end,
1652        .bmap                   = xfs_vm_bmap,
1653        .direct_IO              = xfs_vm_direct_IO,
1654        .migratepage            = buffer_migrate_page,
1655        .is_partially_uptodate  = block_is_partially_uptodate,
1656        .error_remove_page      = generic_error_remove_page,
1657};
1658