linux/fs/mpage.c
<<
>>
Prefs
   1/*
   2 * fs/mpage.c
   3 *
   4 * Copyright (C) 2002, Linus Torvalds.
   5 *
   6 * Contains functions related to preparing and submitting BIOs which contain
   7 * multiple pagecache pages.
   8 *
   9 * 15May2002    Andrew Morton
  10 *              Initial version
  11 * 27Jun2002    axboe@suse.de
  12 *              use bio_add_page() to build bio's just the right size
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/mm.h>
  18#include <linux/kdev_t.h>
  19#include <linux/bio.h>
  20#include <linux/fs.h>
  21#include <linux/buffer_head.h>
  22#include <linux/blkdev.h>
  23#include <linux/highmem.h>
  24#include <linux/prefetch.h>
  25#include <linux/mpage.h>
  26#include <linux/writeback.h>
  27#include <linux/backing-dev.h>
  28#include <linux/pagevec.h>
  29
  30/*
  31 * I/O completion handler for multipage BIOs.
  32 *
  33 * The mpage code never puts partial pages into a BIO (except for end-of-file).
  34 * If a page does not map to a contiguous run of blocks then it simply falls
  35 * back to block_read_full_page().
  36 *
  37 * Why is this?  If a page's completion depends on a number of different BIOs
  38 * which can complete in any order (or at the same time) then determining the
  39 * status of that page is hard.  See end_buffer_async_read() for the details.
  40 * There is no point in duplicating all that complexity.
  41 */
  42static void mpage_end_io_read(struct bio *bio, int err)
  43{
  44        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  45        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  46
  47        do {
  48                struct page *page = bvec->bv_page;
  49
  50                if (--bvec >= bio->bi_io_vec)
  51                        prefetchw(&bvec->bv_page->flags);
  52
  53                if (uptodate) {
  54                        SetPageUptodate(page);
  55                } else {
  56                        ClearPageUptodate(page);
  57                        SetPageError(page);
  58                }
  59                unlock_page(page);
  60        } while (bvec >= bio->bi_io_vec);
  61        bio_put(bio);
  62}
  63
  64static void mpage_end_io_write(struct bio *bio, int err)
  65{
  66        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  67        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  68
  69        do {
  70                struct page *page = bvec->bv_page;
  71
  72                if (--bvec >= bio->bi_io_vec)
  73                        prefetchw(&bvec->bv_page->flags);
  74
  75                if (!uptodate){
  76                        SetPageError(page);
  77                        if (page->mapping)
  78                                set_bit(AS_EIO, &page->mapping->flags);
  79                }
  80                end_page_writeback(page);
  81        } while (bvec >= bio->bi_io_vec);
  82        bio_put(bio);
  83}
  84
  85static struct bio *mpage_bio_submit(int rw, struct bio *bio)
  86{
  87        bio->bi_end_io = mpage_end_io_read;
  88        if (rw == WRITE)
  89                bio->bi_end_io = mpage_end_io_write;
  90        submit_bio(rw, bio);
  91        return NULL;
  92}
  93
  94static struct bio *
  95mpage_alloc(struct block_device *bdev,
  96                sector_t first_sector, int nr_vecs,
  97                gfp_t gfp_flags)
  98{
  99        struct bio *bio;
 100
 101        bio = bio_alloc(gfp_flags, nr_vecs);
 102
 103        if (bio == NULL && (current->flags & PF_MEMALLOC)) {
 104                while (!bio && (nr_vecs /= 2))
 105                        bio = bio_alloc(gfp_flags, nr_vecs);
 106        }
 107
 108        if (bio) {
 109                bio->bi_bdev = bdev;
 110                bio->bi_sector = first_sector;
 111        }
 112        return bio;
 113}
 114
 115/*
 116 * support function for mpage_readpages.  The fs supplied get_block might
 117 * return an up to date buffer.  This is used to map that buffer into
 118 * the page, which allows readpage to avoid triggering a duplicate call
 119 * to get_block.
 120 *
 121 * The idea is to avoid adding buffers to pages that don't already have
 122 * them.  So when the buffer is up to date and the page size == block size,
 123 * this marks the page up to date instead of adding new buffers.
 124 */
 125static void 
 126map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) 
 127{
 128        struct inode *inode = page->mapping->host;
 129        struct buffer_head *page_bh, *head;
 130        int block = 0;
 131
 132        if (!page_has_buffers(page)) {
 133                /*
 134                 * don't make any buffers if there is only one buffer on
 135                 * the page and the page just needs to be set up to date
 136                 */
 137                if (inode->i_blkbits == PAGE_CACHE_SHIFT && 
 138                    buffer_uptodate(bh)) {
 139                        SetPageUptodate(page);    
 140                        return;
 141                }
 142                create_empty_buffers(page, 1 << inode->i_blkbits, 0);
 143        }
 144        head = page_buffers(page);
 145        page_bh = head;
 146        do {
 147                if (block == page_block) {
 148                        page_bh->b_state = bh->b_state;
 149                        page_bh->b_bdev = bh->b_bdev;
 150                        page_bh->b_blocknr = bh->b_blocknr;
 151                        break;
 152                }
 153                page_bh = page_bh->b_this_page;
 154                block++;
 155        } while (page_bh != head);
 156}
 157
 158/*
 159 * This is the worker routine which does all the work of mapping the disk
 160 * blocks and constructs largest possible bios, submits them for IO if the
 161 * blocks are not contiguous on the disk.
 162 *
 163 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
 164 * represent the validity of its disk mapping and to decide when to do the next
 165 * get_block() call.
 166 */
 167static struct bio *
 168do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
 169                sector_t *last_block_in_bio, struct buffer_head *map_bh,
 170                unsigned long *first_logical_block, get_block_t get_block)
 171{
 172        struct inode *inode = page->mapping->host;
 173        const unsigned blkbits = inode->i_blkbits;
 174        const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
 175        const unsigned blocksize = 1 << blkbits;
 176        sector_t block_in_file;
 177        sector_t last_block;
 178        sector_t last_block_in_file;
 179        sector_t blocks[MAX_BUF_PER_PAGE];
 180        unsigned page_block;
 181        unsigned first_hole = blocks_per_page;
 182        struct block_device *bdev = NULL;
 183        int length;
 184        int fully_mapped = 1;
 185        unsigned nblocks;
 186        unsigned relative_block;
 187
 188        if (page_has_buffers(page))
 189                goto confused;
 190
 191        block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
 192        last_block = block_in_file + nr_pages * blocks_per_page;
 193        last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
 194        if (last_block > last_block_in_file)
 195                last_block = last_block_in_file;
 196        page_block = 0;
 197
 198        /*
 199         * Map blocks using the result from the previous get_blocks call first.
 200         */
 201        nblocks = map_bh->b_size >> blkbits;
 202        if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
 203                        block_in_file < (*first_logical_block + nblocks)) {
 204                unsigned map_offset = block_in_file - *first_logical_block;
 205                unsigned last = nblocks - map_offset;
 206
 207                for (relative_block = 0; ; relative_block++) {
 208                        if (relative_block == last) {
 209                                clear_buffer_mapped(map_bh);
 210                                break;
 211                        }
 212                        if (page_block == blocks_per_page)
 213                                break;
 214                        blocks[page_block] = map_bh->b_blocknr + map_offset +
 215                                                relative_block;
 216                        page_block++;
 217                        block_in_file++;
 218                }
 219                bdev = map_bh->b_bdev;
 220        }
 221
 222        /*
 223         * Then do more get_blocks calls until we are done with this page.
 224         */
 225        map_bh->b_page = page;
 226        while (page_block < blocks_per_page) {
 227                map_bh->b_state = 0;
 228                map_bh->b_size = 0;
 229
 230                if (block_in_file < last_block) {
 231                        map_bh->b_size = (last_block-block_in_file) << blkbits;
 232                        if (get_block(inode, block_in_file, map_bh, 0))
 233                                goto confused;
 234                        *first_logical_block = block_in_file;
 235                }
 236
 237                if (!buffer_mapped(map_bh)) {
 238                        fully_mapped = 0;
 239                        if (first_hole == blocks_per_page)
 240                                first_hole = page_block;
 241                        page_block++;
 242                        block_in_file++;
 243                        continue;
 244                }
 245
 246                /* some filesystems will copy data into the page during
 247                 * the get_block call, in which case we don't want to
 248                 * read it again.  map_buffer_to_page copies the data
 249                 * we just collected from get_block into the page's buffers
 250                 * so readpage doesn't have to repeat the get_block call
 251                 */
 252                if (buffer_uptodate(map_bh)) {
 253                        map_buffer_to_page(page, map_bh, page_block);
 254                        goto confused;
 255                }
 256        
 257                if (first_hole != blocks_per_page)
 258                        goto confused;          /* hole -> non-hole */
 259
 260                /* Contiguous blocks? */
 261                if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
 262                        goto confused;
 263                nblocks = map_bh->b_size >> blkbits;
 264                for (relative_block = 0; ; relative_block++) {
 265                        if (relative_block == nblocks) {
 266                                clear_buffer_mapped(map_bh);
 267                                break;
 268                        } else if (page_block == blocks_per_page)
 269                                break;
 270                        blocks[page_block] = map_bh->b_blocknr+relative_block;
 271                        page_block++;
 272                        block_in_file++;
 273                }
 274                bdev = map_bh->b_bdev;
 275        }
 276
 277        if (first_hole != blocks_per_page) {
 278                zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
 279                if (first_hole == 0) {
 280                        SetPageUptodate(page);
 281                        unlock_page(page);
 282                        goto out;
 283                }
 284        } else if (fully_mapped) {
 285                SetPageMappedToDisk(page);
 286        }
 287
 288        /*
 289         * This page will go to BIO.  Do we need to send this BIO off first?
 290         */
 291        if (bio && (*last_block_in_bio != blocks[0] - 1))
 292                bio = mpage_bio_submit(READ, bio);
 293
 294alloc_new:
 295        if (bio == NULL) {
 296                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
 297                                min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
 298                                GFP_KERNEL);
 299                if (bio == NULL)
 300                        goto confused;
 301        }
 302
 303        length = first_hole << blkbits;
 304        if (bio_add_page(bio, page, length, 0) < length) {
 305                bio = mpage_bio_submit(READ, bio);
 306                goto alloc_new;
 307        }
 308
 309        relative_block = block_in_file - *first_logical_block;
 310        nblocks = map_bh->b_size >> blkbits;
 311        if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
 312            (first_hole != blocks_per_page))
 313                bio = mpage_bio_submit(READ, bio);
 314        else
 315                *last_block_in_bio = blocks[blocks_per_page - 1];
 316out:
 317        return bio;
 318
 319confused:
 320        if (bio)
 321                bio = mpage_bio_submit(READ, bio);
 322        if (!PageUptodate(page))
 323                block_read_full_page(page, get_block);
 324        else
 325                unlock_page(page);
 326        goto out;
 327}
 328
 329/**
 330 * mpage_readpages - populate an address space with some pages & start reads against them
 331 * @mapping: the address_space
 332 * @pages: The address of a list_head which contains the target pages.  These
 333 *   pages have their ->index populated and are otherwise uninitialised.
 334 *   The page at @pages->prev has the lowest file offset, and reads should be
 335 *   issued in @pages->prev to @pages->next order.
 336 * @nr_pages: The number of pages at *@pages
 337 * @get_block: The filesystem's block mapper function.
 338 *
 339 * This function walks the pages and the blocks within each page, building and
 340 * emitting large BIOs.
 341 *
 342 * If anything unusual happens, such as:
 343 *
 344 * - encountering a page which has buffers
 345 * - encountering a page which has a non-hole after a hole
 346 * - encountering a page with non-contiguous blocks
 347 *
 348 * then this code just gives up and calls the buffer_head-based read function.
 349 * It does handle a page which has holes at the end - that is a common case:
 350 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
 351 *
 352 * BH_Boundary explanation:
 353 *
 354 * There is a problem.  The mpage read code assembles several pages, gets all
 355 * their disk mappings, and then submits them all.  That's fine, but obtaining
 356 * the disk mappings may require I/O.  Reads of indirect blocks, for example.
 357 *
 358 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
 359 * submitted in the following order:
 360 *      12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
 361 *
 362 * because the indirect block has to be read to get the mappings of blocks
 363 * 13,14,15,16.  Obviously, this impacts performance.
 364 *
 365 * So what we do it to allow the filesystem's get_block() function to set
 366 * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
 367 * after this one will require I/O against a block which is probably close to
 368 * this one.  So you should push what I/O you have currently accumulated.
 369 *
 370 * This all causes the disk requests to be issued in the correct order.
 371 */
 372int
 373mpage_readpages(struct address_space *mapping, struct list_head *pages,
 374                                unsigned nr_pages, get_block_t get_block)
 375{
 376        struct bio *bio = NULL;
 377        unsigned page_idx;
 378        sector_t last_block_in_bio = 0;
 379        struct buffer_head map_bh;
 380        unsigned long first_logical_block = 0;
 381
 382        map_bh.b_state = 0;
 383        map_bh.b_size = 0;
 384        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
 385                struct page *page = list_entry(pages->prev, struct page, lru);
 386
 387                prefetchw(&page->flags);
 388                list_del(&page->lru);
 389                if (!add_to_page_cache_lru(page, mapping,
 390                                        page->index, GFP_KERNEL)) {
 391                        bio = do_mpage_readpage(bio, page,
 392                                        nr_pages - page_idx,
 393                                        &last_block_in_bio, &map_bh,
 394                                        &first_logical_block,
 395                                        get_block);
 396                }
 397                page_cache_release(page);
 398        }
 399        BUG_ON(!list_empty(pages));
 400        if (bio)
 401                mpage_bio_submit(READ, bio);
 402        return 0;
 403}
 404EXPORT_SYMBOL(mpage_readpages);
 405
 406/*
 407 * This isn't called much at all
 408 */
 409int mpage_readpage(struct page *page, get_block_t get_block)
 410{
 411        struct bio *bio = NULL;
 412        sector_t last_block_in_bio = 0;
 413        struct buffer_head map_bh;
 414        unsigned long first_logical_block = 0;
 415
 416        map_bh.b_state = 0;
 417        map_bh.b_size = 0;
 418        bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
 419                        &map_bh, &first_logical_block, get_block);
 420        if (bio)
 421                mpage_bio_submit(READ, bio);
 422        return 0;
 423}
 424EXPORT_SYMBOL(mpage_readpage);
 425
 426/*
 427 * Writing is not so simple.
 428 *
 429 * If the page has buffers then they will be used for obtaining the disk
 430 * mapping.  We only support pages which are fully mapped-and-dirty, with a
 431 * special case for pages which are unmapped at the end: end-of-file.
 432 *
 433 * If the page has no buffers (preferred) then the page is mapped here.
 434 *
 435 * If all blocks are found to be contiguous then the page can go into the
 436 * BIO.  Otherwise fall back to the mapping's writepage().
 437 * 
 438 * FIXME: This code wants an estimate of how many pages are still to be
 439 * written, so it can intelligently allocate a suitably-sized BIO.  For now,
 440 * just allocate full-size (16-page) BIOs.
 441 */
 442
 443struct mpage_data {
 444        struct bio *bio;
 445        sector_t last_block_in_bio;
 446        get_block_t *get_block;
 447        unsigned use_writepage;
 448};
 449
 450static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
 451                      void *data)
 452{
 453        struct mpage_data *mpd = data;
 454        struct bio *bio = mpd->bio;
 455        struct address_space *mapping = page->mapping;
 456        struct inode *inode = page->mapping->host;
 457        const unsigned blkbits = inode->i_blkbits;
 458        unsigned long end_index;
 459        const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
 460        sector_t last_block;
 461        sector_t block_in_file;
 462        sector_t blocks[MAX_BUF_PER_PAGE];
 463        unsigned page_block;
 464        unsigned first_unmapped = blocks_per_page;
 465        struct block_device *bdev = NULL;
 466        int boundary = 0;
 467        sector_t boundary_block = 0;
 468        struct block_device *boundary_bdev = NULL;
 469        int length;
 470        struct buffer_head map_bh;
 471        loff_t i_size = i_size_read(inode);
 472        int ret = 0;
 473
 474        if (page_has_buffers(page)) {
 475                struct buffer_head *head = page_buffers(page);
 476                struct buffer_head *bh = head;
 477
 478                /* If they're all mapped and dirty, do it */
 479                page_block = 0;
 480                do {
 481                        BUG_ON(buffer_locked(bh));
 482                        if (!buffer_mapped(bh)) {
 483                                /*
 484                                 * unmapped dirty buffers are created by
 485                                 * __set_page_dirty_buffers -> mmapped data
 486                                 */
 487                                if (buffer_dirty(bh))
 488                                        goto confused;
 489                                if (first_unmapped == blocks_per_page)
 490                                        first_unmapped = page_block;
 491                                continue;
 492                        }
 493
 494                        if (first_unmapped != blocks_per_page)
 495                                goto confused;  /* hole -> non-hole */
 496
 497                        if (!buffer_dirty(bh) || !buffer_uptodate(bh))
 498                                goto confused;
 499                        if (page_block) {
 500                                if (bh->b_blocknr != blocks[page_block-1] + 1)
 501                                        goto confused;
 502                        }
 503                        blocks[page_block++] = bh->b_blocknr;
 504                        boundary = buffer_boundary(bh);
 505                        if (boundary) {
 506                                boundary_block = bh->b_blocknr;
 507                                boundary_bdev = bh->b_bdev;
 508                        }
 509                        bdev = bh->b_bdev;
 510                } while ((bh = bh->b_this_page) != head);
 511
 512                if (first_unmapped)
 513                        goto page_is_mapped;
 514
 515                /*
 516                 * Page has buffers, but they are all unmapped. The page was
 517                 * created by pagein or read over a hole which was handled by
 518                 * block_read_full_page().  If this address_space is also
 519                 * using mpage_readpages then this can rarely happen.
 520                 */
 521                goto confused;
 522        }
 523
 524        /*
 525         * The page has no buffers: map it to disk
 526         */
 527        BUG_ON(!PageUptodate(page));
 528        block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
 529        last_block = (i_size - 1) >> blkbits;
 530        map_bh.b_page = page;
 531        for (page_block = 0; page_block < blocks_per_page; ) {
 532
 533                map_bh.b_state = 0;
 534                map_bh.b_size = 1 << blkbits;
 535                if (mpd->get_block(inode, block_in_file, &map_bh, 1))
 536                        goto confused;
 537                if (buffer_new(&map_bh))
 538                        unmap_underlying_metadata(map_bh.b_bdev,
 539                                                map_bh.b_blocknr);
 540                if (buffer_boundary(&map_bh)) {
 541                        boundary_block = map_bh.b_blocknr;
 542                        boundary_bdev = map_bh.b_bdev;
 543                }
 544                if (page_block) {
 545                        if (map_bh.b_blocknr != blocks[page_block-1] + 1)
 546                                goto confused;
 547                }
 548                blocks[page_block++] = map_bh.b_blocknr;
 549                boundary = buffer_boundary(&map_bh);
 550                bdev = map_bh.b_bdev;
 551                if (block_in_file == last_block)
 552                        break;
 553                block_in_file++;
 554        }
 555        BUG_ON(page_block == 0);
 556
 557        first_unmapped = page_block;
 558
 559page_is_mapped:
 560        end_index = i_size >> PAGE_CACHE_SHIFT;
 561        if (page->index >= end_index) {
 562                /*
 563                 * The page straddles i_size.  It must be zeroed out on each
 564                 * and every writepage invokation because it may be mmapped.
 565                 * "A file is mapped in multiples of the page size.  For a file
 566                 * that is not a multiple of the page size, the remaining memory
 567                 * is zeroed when mapped, and writes to that region are not
 568                 * written out to the file."
 569                 */
 570                unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
 571
 572                if (page->index > end_index || !offset)
 573                        goto confused;
 574                zero_user_segment(page, offset, PAGE_CACHE_SIZE);
 575        }
 576
 577        /*
 578         * This page will go to BIO.  Do we need to send this BIO off first?
 579         */
 580        if (bio && mpd->last_block_in_bio != blocks[0] - 1)
 581                bio = mpage_bio_submit(WRITE, bio);
 582
 583alloc_new:
 584        if (bio == NULL) {
 585                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
 586                                bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
 587                if (bio == NULL)
 588                        goto confused;
 589        }
 590
 591        /*
 592         * Must try to add the page before marking the buffer clean or
 593         * the confused fail path above (OOM) will be very confused when
 594         * it finds all bh marked clean (i.e. it will not write anything)
 595         */
 596        length = first_unmapped << blkbits;
 597        if (bio_add_page(bio, page, length, 0) < length) {
 598                bio = mpage_bio_submit(WRITE, bio);
 599                goto alloc_new;
 600        }
 601
 602        /*
 603         * OK, we have our BIO, so we can now mark the buffers clean.  Make
 604         * sure to only clean buffers which we know we'll be writing.
 605         */
 606        if (page_has_buffers(page)) {
 607                struct buffer_head *head = page_buffers(page);
 608                struct buffer_head *bh = head;
 609                unsigned buffer_counter = 0;
 610
 611                do {
 612                        if (buffer_counter++ == first_unmapped)
 613                                break;
 614                        clear_buffer_dirty(bh);
 615                        bh = bh->b_this_page;
 616                } while (bh != head);
 617
 618                /*
 619                 * we cannot drop the bh if the page is not uptodate
 620                 * or a concurrent readpage would fail to serialize with the bh
 621                 * and it would read from disk before we reach the platter.
 622                 */
 623                if (buffer_heads_over_limit && PageUptodate(page))
 624                        try_to_free_buffers(page);
 625        }
 626
 627        BUG_ON(PageWriteback(page));
 628        set_page_writeback(page);
 629        unlock_page(page);
 630        if (boundary || (first_unmapped != blocks_per_page)) {
 631                bio = mpage_bio_submit(WRITE, bio);
 632                if (boundary_block) {
 633                        write_boundary_block(boundary_bdev,
 634                                        boundary_block, 1 << blkbits);
 635                }
 636        } else {
 637                mpd->last_block_in_bio = blocks[blocks_per_page - 1];
 638        }
 639        goto out;
 640
 641confused:
 642        if (bio)
 643                bio = mpage_bio_submit(WRITE, bio);
 644
 645        if (mpd->use_writepage) {
 646                ret = mapping->a_ops->writepage(page, wbc);
 647        } else {
 648                ret = -EAGAIN;
 649                goto out;
 650        }
 651        /*
 652         * The caller has a ref on the inode, so *mapping is stable
 653         */
 654        mapping_set_error(mapping, ret);
 655out:
 656        mpd->bio = bio;
 657        return ret;
 658}
 659
 660/**
 661 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
 662 * @mapping: address space structure to write
 663 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 664 * @get_block: the filesystem's block mapper function.
 665 *             If this is NULL then use a_ops->writepage.  Otherwise, go
 666 *             direct-to-BIO.
 667 *
 668 * This is a library function, which implements the writepages()
 669 * address_space_operation.
 670 *
 671 * If a page is already under I/O, generic_writepages() skips it, even
 672 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 673 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 674 * and msync() need to guarantee that all the data which was dirty at the time
 675 * the call was made get new I/O started against them.  If wbc->sync_mode is
 676 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 677 * existing IO to complete.
 678 */
 679int
 680mpage_writepages(struct address_space *mapping,
 681                struct writeback_control *wbc, get_block_t get_block)
 682{
 683        int ret;
 684
 685        if (!get_block)
 686                ret = generic_writepages(mapping, wbc);
 687        else {
 688                struct mpage_data mpd = {
 689                        .bio = NULL,
 690                        .last_block_in_bio = 0,
 691                        .get_block = get_block,
 692                        .use_writepage = 1,
 693                };
 694
 695                ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
 696                if (mpd.bio)
 697                        mpage_bio_submit(WRITE, mpd.bio);
 698        }
 699        return ret;
 700}
 701EXPORT_SYMBOL(mpage_writepages);
 702
 703int mpage_writepage(struct page *page, get_block_t get_block,
 704        struct writeback_control *wbc)
 705{
 706        struct mpage_data mpd = {
 707                .bio = NULL,
 708                .last_block_in_bio = 0,
 709                .get_block = get_block,
 710                .use_writepage = 0,
 711        };
 712        int ret = __mpage_writepage(page, wbc, &mpd);
 713        if (mpd.bio)
 714                mpage_bio_submit(WRITE, mpd.bio);
 715        return ret;
 716}
 717EXPORT_SYMBOL(mpage_writepage);
 718