linux/fs/mpage.c
<<
>>
Prefs
   1/*
   2 * fs/mpage.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 *
   6 * Contains functions related to preparing and submitting BIOs which contain
   7 * multiple pagecache pages.
   8 *
   9 * 15May2002    Andrew Morton
  10 *              Initial version
  11 * 27Jun2002    axboe@suse.de
  12 *              use bio_add_page() to build bio's just the right size
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/export.h>
  17#include <linux/mm.h>
  18#include <linux/kdev_t.h>
  19#include <linux/gfp.h>
  20#include <linux/bio.h>
  21#include <linux/fs.h>
  22#include <linux/buffer_head.h>
  23#include <linux/blkdev.h>
  24#include <linux/highmem.h>
  25#include <linux/prefetch.h>
  26#include <linux/mpage.h>
  27#include <linux/writeback.h>
  28#include <linux/backing-dev.h>
  29#include <linux/pagevec.h>
  30#include <linux/cleancache.h>
  31#include "internal.h"
  32
  33/*
  34 * I/O completion handler for multipage BIOs.
  35 *
  36 * The mpage code never puts partial pages into a BIO (except for end-of-file).
  37 * If a page does not map to a contiguous run of blocks then it simply falls
  38 * back to block_read_full_page().
  39 *
  40 * Why is this?  If a page's completion depends on a number of different BIOs
  41 * which can complete in any order (or at the same time) then determining the
  42 * status of that page is hard.  See end_buffer_async_read() for the details.
  43 * There is no point in duplicating all that complexity.
  44 */
  45static void mpage_end_io(struct bio *bio)
  46{
  47        struct bio_vec *bv;
  48        int i;
  49
  50        bio_for_each_segment_all(bv, bio, i) {
  51                struct page *page = bv->bv_page;
  52                page_endio(page, bio_data_dir(bio), bio->bi_error);
  53        }
  54
  55        bio_put(bio);
  56}
  57
  58static struct bio *mpage_bio_submit(int rw, struct bio *bio)
  59{
  60        bio->bi_end_io = mpage_end_io;
  61        guard_bio_eod(rw, bio);
  62        submit_bio(rw, bio);
  63        return NULL;
  64}
  65
  66static struct bio *
  67mpage_alloc(struct block_device *bdev,
  68                sector_t first_sector, int nr_vecs,
  69                gfp_t gfp_flags)
  70{
  71        struct bio *bio;
  72
  73        bio = bio_alloc(gfp_flags, nr_vecs);
  74
  75        if (bio == NULL && (current->flags & PF_MEMALLOC)) {
  76                while (!bio && (nr_vecs /= 2))
  77                        bio = bio_alloc(gfp_flags, nr_vecs);
  78        }
  79
  80        if (bio) {
  81                bio->bi_bdev = bdev;
  82                bio->bi_iter.bi_sector = first_sector;
  83        }
  84        return bio;
  85}
  86
  87/*
  88 * support function for mpage_readpages.  The fs supplied get_block might
  89 * return an up to date buffer.  This is used to map that buffer into
  90 * the page, which allows readpage to avoid triggering a duplicate call
  91 * to get_block.
  92 *
  93 * The idea is to avoid adding buffers to pages that don't already have
  94 * them.  So when the buffer is up to date and the page size == block size,
  95 * this marks the page up to date instead of adding new buffers.
  96 */
  97static void 
  98map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) 
  99{
 100        struct inode *inode = page->mapping->host;
 101        struct buffer_head *page_bh, *head;
 102        int block = 0;
 103
 104        if (!page_has_buffers(page)) {
 105                /*
 106                 * don't make any buffers if there is only one buffer on
 107                 * the page and the page just needs to be set up to date
 108                 */
 109                if (inode->i_blkbits == PAGE_CACHE_SHIFT && 
 110                    buffer_uptodate(bh)) {
 111                        SetPageUptodate(page);    
 112                        return;
 113                }
 114                create_empty_buffers(page, 1 << inode->i_blkbits, 0);
 115        }
 116        head = page_buffers(page);
 117        page_bh = head;
 118        do {
 119                if (block == page_block) {
 120                        page_bh->b_state = bh->b_state;
 121                        page_bh->b_bdev = bh->b_bdev;
 122                        page_bh->b_blocknr = bh->b_blocknr;
 123                        break;
 124                }
 125                page_bh = page_bh->b_this_page;
 126                block++;
 127        } while (page_bh != head);
 128}
 129
 130/*
 131 * This is the worker routine which does all the work of mapping the disk
 132 * blocks and constructs largest possible bios, submits them for IO if the
 133 * blocks are not contiguous on the disk.
 134 *
 135 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
 136 * represent the validity of its disk mapping and to decide when to do the next
 137 * get_block() call.
 138 */
 139static struct bio *
 140do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
 141                sector_t *last_block_in_bio, struct buffer_head *map_bh,
 142                unsigned long *first_logical_block, get_block_t get_block,
 143                gfp_t gfp)
 144{
 145        struct inode *inode = page->mapping->host;
 146        const unsigned blkbits = inode->i_blkbits;
 147        const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
 148        const unsigned blocksize = 1 << blkbits;
 149        sector_t block_in_file;
 150        sector_t last_block;
 151        sector_t last_block_in_file;
 152        sector_t blocks[MAX_BUF_PER_PAGE];
 153        unsigned page_block;
 154        unsigned first_hole = blocks_per_page;
 155        struct block_device *bdev = NULL;
 156        int length;
 157        int fully_mapped = 1;
 158        unsigned nblocks;
 159        unsigned relative_block;
 160
 161        if (page_has_buffers(page))
 162                goto confused;
 163
 164        block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
 165        last_block = block_in_file + nr_pages * blocks_per_page;
 166        last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
 167        if (last_block > last_block_in_file)
 168                last_block = last_block_in_file;
 169        page_block = 0;
 170
 171        /*
 172         * Map blocks using the result from the previous get_blocks call first.
 173         */
 174        nblocks = map_bh->b_size >> blkbits;
 175        if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
 176                        block_in_file < (*first_logical_block + nblocks)) {
 177                unsigned map_offset = block_in_file - *first_logical_block;
 178                unsigned last = nblocks - map_offset;
 179
 180                for (relative_block = 0; ; relative_block++) {
 181                        if (relative_block == last) {
 182                                clear_buffer_mapped(map_bh);
 183                                break;
 184                        }
 185                        if (page_block == blocks_per_page)
 186                                break;
 187                        blocks[page_block] = map_bh->b_blocknr + map_offset +
 188                                                relative_block;
 189                        page_block++;
 190                        block_in_file++;
 191                }
 192                bdev = map_bh->b_bdev;
 193        }
 194
 195        /*
 196         * Then do more get_blocks calls until we are done with this page.
 197         */
 198        map_bh->b_page = page;
 199        while (page_block < blocks_per_page) {
 200                map_bh->b_state = 0;
 201                map_bh->b_size = 0;
 202
 203                if (block_in_file < last_block) {
 204                        map_bh->b_size = (last_block-block_in_file) << blkbits;
 205                        if (get_block(inode, block_in_file, map_bh, 0))
 206                                goto confused;
 207                        *first_logical_block = block_in_file;
 208                }
 209
 210                if (!buffer_mapped(map_bh)) {
 211                        fully_mapped = 0;
 212                        if (first_hole == blocks_per_page)
 213                                first_hole = page_block;
 214                        page_block++;
 215                        block_in_file++;
 216                        continue;
 217                }
 218
 219                /* some filesystems will copy data into the page during
 220                 * the get_block call, in which case we don't want to
 221                 * read it again.  map_buffer_to_page copies the data
 222                 * we just collected from get_block into the page's buffers
 223                 * so readpage doesn't have to repeat the get_block call
 224                 */
 225                if (buffer_uptodate(map_bh)) {
 226                        map_buffer_to_page(page, map_bh, page_block);
 227                        goto confused;
 228                }
 229        
 230                if (first_hole != blocks_per_page)
 231                        goto confused;          /* hole -> non-hole */
 232
 233                /* Contiguous blocks? */
 234                if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
 235                        goto confused;
 236                nblocks = map_bh->b_size >> blkbits;
 237                for (relative_block = 0; ; relative_block++) {
 238                        if (relative_block == nblocks) {
 239                                clear_buffer_mapped(map_bh);
 240                                break;
 241                        } else if (page_block == blocks_per_page)
 242                                break;
 243                        blocks[page_block] = map_bh->b_blocknr+relative_block;
 244                        page_block++;
 245                        block_in_file++;
 246                }
 247                bdev = map_bh->b_bdev;
 248        }
 249
 250        if (first_hole != blocks_per_page) {
 251                zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
 252                if (first_hole == 0) {
 253                        SetPageUptodate(page);
 254                        unlock_page(page);
 255                        goto out;
 256                }
 257        } else if (fully_mapped) {
 258                SetPageMappedToDisk(page);
 259        }
 260
 261        if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
 262            cleancache_get_page(page) == 0) {
 263                SetPageUptodate(page);
 264                goto confused;
 265        }
 266
 267        /*
 268         * This page will go to BIO.  Do we need to send this BIO off first?
 269         */
 270        if (bio && (*last_block_in_bio != blocks[0] - 1))
 271                bio = mpage_bio_submit(READ, bio);
 272
 273alloc_new:
 274        if (bio == NULL) {
 275                if (first_hole == blocks_per_page) {
 276                        if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
 277                                                                page))
 278                                goto out;
 279                }
 280                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
 281                                min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
 282                if (bio == NULL)
 283                        goto confused;
 284        }
 285
 286        length = first_hole << blkbits;
 287        if (bio_add_page(bio, page, length, 0) < length) {
 288                bio = mpage_bio_submit(READ, bio);
 289                goto alloc_new;
 290        }
 291
 292        relative_block = block_in_file - *first_logical_block;
 293        nblocks = map_bh->b_size >> blkbits;
 294        if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
 295            (first_hole != blocks_per_page))
 296                bio = mpage_bio_submit(READ, bio);
 297        else
 298                *last_block_in_bio = blocks[blocks_per_page - 1];
 299out:
 300        return bio;
 301
 302confused:
 303        if (bio)
 304                bio = mpage_bio_submit(READ, bio);
 305        if (!PageUptodate(page))
 306                block_read_full_page(page, get_block);
 307        else
 308                unlock_page(page);
 309        goto out;
 310}
 311
 312/**
 313 * mpage_readpages - populate an address space with some pages & start reads against them
 314 * @mapping: the address_space
 315 * @pages: The address of a list_head which contains the target pages.  These
 316 *   pages have their ->index populated and are otherwise uninitialised.
 317 *   The page at @pages->prev has the lowest file offset, and reads should be
 318 *   issued in @pages->prev to @pages->next order.
 319 * @nr_pages: The number of pages at *@pages
 320 * @get_block: The filesystem's block mapper function.
 321 *
 322 * This function walks the pages and the blocks within each page, building and
 323 * emitting large BIOs.
 324 *
 325 * If anything unusual happens, such as:
 326 *
 327 * - encountering a page which has buffers
 328 * - encountering a page which has a non-hole after a hole
 329 * - encountering a page with non-contiguous blocks
 330 *
 331 * then this code just gives up and calls the buffer_head-based read function.
 332 * It does handle a page which has holes at the end - that is a common case:
 333 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
 334 *
 335 * BH_Boundary explanation:
 336 *
 337 * There is a problem.  The mpage read code assembles several pages, gets all
 338 * their disk mappings, and then submits them all.  That's fine, but obtaining
 339 * the disk mappings may require I/O.  Reads of indirect blocks, for example.
 340 *
 341 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
 342 * submitted in the following order:
 343 *      12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
 344 *
 345 * because the indirect block has to be read to get the mappings of blocks
 346 * 13,14,15,16.  Obviously, this impacts performance.
 347 *
 348 * So what we do it to allow the filesystem's get_block() function to set
 349 * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
 350 * after this one will require I/O against a block which is probably close to
 351 * this one.  So you should push what I/O you have currently accumulated.
 352 *
 353 * This all causes the disk requests to be issued in the correct order.
 354 */
 355int
 356mpage_readpages(struct address_space *mapping, struct list_head *pages,
 357                                unsigned nr_pages, get_block_t get_block)
 358{
 359        struct bio *bio = NULL;
 360        unsigned page_idx;
 361        sector_t last_block_in_bio = 0;
 362        struct buffer_head map_bh;
 363        unsigned long first_logical_block = 0;
 364        gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
 365
 366        map_bh.b_state = 0;
 367        map_bh.b_size = 0;
 368        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
 369                struct page *page = list_entry(pages->prev, struct page, lru);
 370
 371                prefetchw(&page->flags);
 372                list_del(&page->lru);
 373                if (!add_to_page_cache_lru(page, mapping,
 374                                        page->index,
 375                                        gfp)) {
 376                        bio = do_mpage_readpage(bio, page,
 377                                        nr_pages - page_idx,
 378                                        &last_block_in_bio, &map_bh,
 379                                        &first_logical_block,
 380                                        get_block, gfp);
 381                }
 382                page_cache_release(page);
 383        }
 384        BUG_ON(!list_empty(pages));
 385        if (bio)
 386                mpage_bio_submit(READ, bio);
 387        return 0;
 388}
 389EXPORT_SYMBOL(mpage_readpages);
 390
 391/*
 392 * This isn't called much at all
 393 */
 394int mpage_readpage(struct page *page, get_block_t get_block)
 395{
 396        struct bio *bio = NULL;
 397        sector_t last_block_in_bio = 0;
 398        struct buffer_head map_bh;
 399        unsigned long first_logical_block = 0;
 400        gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
 401
 402        map_bh.b_state = 0;
 403        map_bh.b_size = 0;
 404        bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
 405                        &map_bh, &first_logical_block, get_block, gfp);
 406        if (bio)
 407                mpage_bio_submit(READ, bio);
 408        return 0;
 409}
 410EXPORT_SYMBOL(mpage_readpage);
 411
 412/*
 413 * Writing is not so simple.
 414 *
 415 * If the page has buffers then they will be used for obtaining the disk
 416 * mapping.  We only support pages which are fully mapped-and-dirty, with a
 417 * special case for pages which are unmapped at the end: end-of-file.
 418 *
 419 * If the page has no buffers (preferred) then the page is mapped here.
 420 *
 421 * If all blocks are found to be contiguous then the page can go into the
 422 * BIO.  Otherwise fall back to the mapping's writepage().
 423 * 
 424 * FIXME: This code wants an estimate of how many pages are still to be
 425 * written, so it can intelligently allocate a suitably-sized BIO.  For now,
 426 * just allocate full-size (16-page) BIOs.
 427 */
 428
 429struct mpage_data {
 430        struct bio *bio;
 431        sector_t last_block_in_bio;
 432        get_block_t *get_block;
 433        unsigned use_writepage;
 434};
 435
 436/*
 437 * We have our BIO, so we can now mark the buffers clean.  Make
 438 * sure to only clean buffers which we know we'll be writing.
 439 */
 440static void clean_buffers(struct page *page, unsigned first_unmapped)
 441{
 442        unsigned buffer_counter = 0;
 443        struct buffer_head *bh, *head;
 444        if (!page_has_buffers(page))
 445                return;
 446        head = page_buffers(page);
 447        bh = head;
 448
 449        do {
 450                if (buffer_counter++ == first_unmapped)
 451                        break;
 452                clear_buffer_dirty(bh);
 453                bh = bh->b_this_page;
 454        } while (bh != head);
 455
 456        /*
 457         * we cannot drop the bh if the page is not uptodate or a concurrent
 458         * readpage would fail to serialize with the bh and it would read from
 459         * disk before we reach the platter.
 460         */
 461        if (buffer_heads_over_limit && PageUptodate(page))
 462                try_to_free_buffers(page);
 463}
 464
 465static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
 466                      void *data)
 467{
 468        struct mpage_data *mpd = data;
 469        struct bio *bio = mpd->bio;
 470        struct address_space *mapping = page->mapping;
 471        struct inode *inode = page->mapping->host;
 472        const unsigned blkbits = inode->i_blkbits;
 473        unsigned long end_index;
 474        const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
 475        sector_t last_block;
 476        sector_t block_in_file;
 477        sector_t blocks[MAX_BUF_PER_PAGE];
 478        unsigned page_block;
 479        unsigned first_unmapped = blocks_per_page;
 480        struct block_device *bdev = NULL;
 481        int boundary = 0;
 482        sector_t boundary_block = 0;
 483        struct block_device *boundary_bdev = NULL;
 484        int length;
 485        struct buffer_head map_bh;
 486        loff_t i_size = i_size_read(inode);
 487        int ret = 0;
 488        int wr = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
 489
 490        if (page_has_buffers(page)) {
 491                struct buffer_head *head = page_buffers(page);
 492                struct buffer_head *bh = head;
 493
 494                /* If they're all mapped and dirty, do it */
 495                page_block = 0;
 496                do {
 497                        BUG_ON(buffer_locked(bh));
 498                        if (!buffer_mapped(bh)) {
 499                                /*
 500                                 * unmapped dirty buffers are created by
 501                                 * __set_page_dirty_buffers -> mmapped data
 502                                 */
 503                                if (buffer_dirty(bh))
 504                                        goto confused;
 505                                if (first_unmapped == blocks_per_page)
 506                                        first_unmapped = page_block;
 507                                continue;
 508                        }
 509
 510                        if (first_unmapped != blocks_per_page)
 511                                goto confused;  /* hole -> non-hole */
 512
 513                        if (!buffer_dirty(bh) || !buffer_uptodate(bh))
 514                                goto confused;
 515                        if (page_block) {
 516                                if (bh->b_blocknr != blocks[page_block-1] + 1)
 517                                        goto confused;
 518                        }
 519                        blocks[page_block++] = bh->b_blocknr;
 520                        boundary = buffer_boundary(bh);
 521                        if (boundary) {
 522                                boundary_block = bh->b_blocknr;
 523                                boundary_bdev = bh->b_bdev;
 524                        }
 525                        bdev = bh->b_bdev;
 526                } while ((bh = bh->b_this_page) != head);
 527
 528                if (first_unmapped)
 529                        goto page_is_mapped;
 530
 531                /*
 532                 * Page has buffers, but they are all unmapped. The page was
 533                 * created by pagein or read over a hole which was handled by
 534                 * block_read_full_page().  If this address_space is also
 535                 * using mpage_readpages then this can rarely happen.
 536                 */
 537                goto confused;
 538        }
 539
 540        /*
 541         * The page has no buffers: map it to disk
 542         */
 543        BUG_ON(!PageUptodate(page));
 544        block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
 545        last_block = (i_size - 1) >> blkbits;
 546        map_bh.b_page = page;
 547        for (page_block = 0; page_block < blocks_per_page; ) {
 548
 549                map_bh.b_state = 0;
 550                map_bh.b_size = 1 << blkbits;
 551                if (mpd->get_block(inode, block_in_file, &map_bh, 1))
 552                        goto confused;
 553                if (buffer_new(&map_bh))
 554                        unmap_underlying_metadata(map_bh.b_bdev,
 555                                                map_bh.b_blocknr);
 556                if (buffer_boundary(&map_bh)) {
 557                        boundary_block = map_bh.b_blocknr;
 558                        boundary_bdev = map_bh.b_bdev;
 559                }
 560                if (page_block) {
 561                        if (map_bh.b_blocknr != blocks[page_block-1] + 1)
 562                                goto confused;
 563                }
 564                blocks[page_block++] = map_bh.b_blocknr;
 565                boundary = buffer_boundary(&map_bh);
 566                bdev = map_bh.b_bdev;
 567                if (block_in_file == last_block)
 568                        break;
 569                block_in_file++;
 570        }
 571        BUG_ON(page_block == 0);
 572
 573        first_unmapped = page_block;
 574
 575page_is_mapped:
 576        end_index = i_size >> PAGE_CACHE_SHIFT;
 577        if (page->index >= end_index) {
 578                /*
 579                 * The page straddles i_size.  It must be zeroed out on each
 580                 * and every writepage invocation because it may be mmapped.
 581                 * "A file is mapped in multiples of the page size.  For a file
 582                 * that is not a multiple of the page size, the remaining memory
 583                 * is zeroed when mapped, and writes to that region are not
 584                 * written out to the file."
 585                 */
 586                unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
 587
 588                if (page->index > end_index || !offset)
 589                        goto confused;
 590                zero_user_segment(page, offset, PAGE_CACHE_SIZE);
 591        }
 592
 593        /*
 594         * This page will go to BIO.  Do we need to send this BIO off first?
 595         */
 596        if (bio && mpd->last_block_in_bio != blocks[0] - 1)
 597                bio = mpage_bio_submit(wr, bio);
 598
 599alloc_new:
 600        if (bio == NULL) {
 601                if (first_unmapped == blocks_per_page) {
 602                        if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
 603                                                                page, wbc)) {
 604                                clean_buffers(page, first_unmapped);
 605                                goto out;
 606                        }
 607                }
 608                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
 609                                BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
 610                if (bio == NULL)
 611                        goto confused;
 612
 613                wbc_init_bio(wbc, bio);
 614        }
 615
 616        /*
 617         * Must try to add the page before marking the buffer clean or
 618         * the confused fail path above (OOM) will be very confused when
 619         * it finds all bh marked clean (i.e. it will not write anything)
 620         */
 621        wbc_account_io(wbc, page, PAGE_SIZE);
 622        length = first_unmapped << blkbits;
 623        if (bio_add_page(bio, page, length, 0) < length) {
 624                bio = mpage_bio_submit(wr, bio);
 625                goto alloc_new;
 626        }
 627
 628        clean_buffers(page, first_unmapped);
 629
 630        BUG_ON(PageWriteback(page));
 631        set_page_writeback(page);
 632        unlock_page(page);
 633        if (boundary || (first_unmapped != blocks_per_page)) {
 634                bio = mpage_bio_submit(wr, bio);
 635                if (boundary_block) {
 636                        write_boundary_block(boundary_bdev,
 637                                        boundary_block, 1 << blkbits);
 638                }
 639        } else {
 640                mpd->last_block_in_bio = blocks[blocks_per_page - 1];
 641        }
 642        goto out;
 643
 644confused:
 645        if (bio)
 646                bio = mpage_bio_submit(wr, bio);
 647
 648        if (mpd->use_writepage) {
 649                ret = mapping->a_ops->writepage(page, wbc);
 650        } else {
 651                ret = -EAGAIN;
 652                goto out;
 653        }
 654        /*
 655         * The caller has a ref on the inode, so *mapping is stable
 656         */
 657        mapping_set_error(mapping, ret);
 658out:
 659        mpd->bio = bio;
 660        return ret;
 661}
 662
 663/**
 664 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
 665 * @mapping: address space structure to write
 666 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 667 * @get_block: the filesystem's block mapper function.
 668 *             If this is NULL then use a_ops->writepage.  Otherwise, go
 669 *             direct-to-BIO.
 670 *
 671 * This is a library function, which implements the writepages()
 672 * address_space_operation.
 673 *
 674 * If a page is already under I/O, generic_writepages() skips it, even
 675 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 676 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 677 * and msync() need to guarantee that all the data which was dirty at the time
 678 * the call was made get new I/O started against them.  If wbc->sync_mode is
 679 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 680 * existing IO to complete.
 681 */
 682int
 683mpage_writepages(struct address_space *mapping,
 684                struct writeback_control *wbc, get_block_t get_block)
 685{
 686        struct blk_plug plug;
 687        int ret;
 688
 689        blk_start_plug(&plug);
 690
 691        if (!get_block)
 692                ret = generic_writepages(mapping, wbc);
 693        else {
 694                struct mpage_data mpd = {
 695                        .bio = NULL,
 696                        .last_block_in_bio = 0,
 697                        .get_block = get_block,
 698                        .use_writepage = 1,
 699                };
 700
 701                ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
 702                if (mpd.bio) {
 703                        int wr = (wbc->sync_mode == WB_SYNC_ALL ?
 704                                  WRITE_SYNC : WRITE);
 705                        mpage_bio_submit(wr, mpd.bio);
 706                }
 707        }
 708        blk_finish_plug(&plug);
 709        return ret;
 710}
 711EXPORT_SYMBOL(mpage_writepages);
 712
 713int mpage_writepage(struct page *page, get_block_t get_block,
 714        struct writeback_control *wbc)
 715{
 716        struct mpage_data mpd = {
 717                .bio = NULL,
 718                .last_block_in_bio = 0,
 719                .get_block = get_block,
 720                .use_writepage = 0,
 721        };
 722        int ret = __mpage_writepage(page, wbc, &mpd);
 723        if (mpd.bio) {
 724                int wr = (wbc->sync_mode == WB_SYNC_ALL ?
 725                          WRITE_SYNC : WRITE);
 726                mpage_bio_submit(wr, mpd.bio);
 727        }
 728        return ret;
 729}
 730EXPORT_SYMBOL(mpage_writepage);
 731