linux/mm/filemap.c
<<
>>
Prefs
   1/*
   2 *      linux/mm/filemap.c
   3 *
   4 * Copyright (C) 1994-1999  Linus Torvalds
   5 */
   6
   7/*
   8 * This file handles the generic file mmap semantics used by
   9 * most "normal" filesystems (but you don't /have/ to use this:
  10 * the NFS filesystem used to do this differently, for example)
  11 */
  12#include <linux/module.h>
  13#include <linux/compiler.h>
  14#include <linux/fs.h>
  15#include <linux/uaccess.h>
  16#include <linux/aio.h>
  17#include <linux/capability.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/gfp.h>
  20#include <linux/mm.h>
  21#include <linux/swap.h>
  22#include <linux/mman.h>
  23#include <linux/pagemap.h>
  24#include <linux/file.h>
  25#include <linux/uio.h>
  26#include <linux/hash.h>
  27#include <linux/writeback.h>
  28#include <linux/backing-dev.h>
  29#include <linux/pagevec.h>
  30#include <linux/blkdev.h>
  31#include <linux/security.h>
  32#include <linux/syscalls.h>
  33#include <linux/cpuset.h>
  34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
  35#include <linux/memcontrol.h>
  36#include <linux/mm_inline.h> /* for page_is_file_cache() */
  37#include "internal.h"
  38
  39/*
  40 * FIXME: remove all knowledge of the buffer layer from the core VM
  41 */
  42#include <linux/buffer_head.h> /* for try_to_free_buffers */
  43
  44#include <asm/mman.h>
  45
  46/*
  47 * Shared mappings implemented 30.11.1994. It's not fully working yet,
  48 * though.
  49 *
  50 * Shared mappings now work. 15.8.1995  Bruno.
  51 *
  52 * finished 'unifying' the page and buffer cache and SMP-threaded the
  53 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
  54 *
  55 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
  56 */
  57
  58/*
  59 * Lock ordering:
  60 *
  61 *  ->i_mmap_lock               (truncate_pagecache)
  62 *    ->private_lock            (__free_pte->__set_page_dirty_buffers)
  63 *      ->swap_lock             (exclusive_swap_page, others)
  64 *        ->mapping->tree_lock
  65 *
  66 *  ->i_mutex
  67 *    ->i_mmap_lock             (truncate->unmap_mapping_range)
  68 *
  69 *  ->mmap_sem
  70 *    ->i_mmap_lock
  71 *      ->page_table_lock or pte_lock   (various, mainly in memory.c)
  72 *        ->mapping->tree_lock  (arch-dependent flush_dcache_mmap_lock)
  73 *
  74 *  ->mmap_sem
  75 *    ->lock_page               (access_process_vm)
  76 *
  77 *  ->i_mutex                   (generic_file_buffered_write)
  78 *    ->mmap_sem                (fault_in_pages_readable->do_page_fault)
  79 *
  80 *  ->i_mutex
  81 *    ->i_alloc_sem             (various)
  82 *
  83 *  inode_wb_list_lock
  84 *    sb_lock                   (fs/fs-writeback.c)
  85 *    ->mapping->tree_lock      (__sync_single_inode)
  86 *
  87 *  ->i_mmap_lock
  88 *    ->anon_vma.lock           (vma_adjust)
  89 *
  90 *  ->anon_vma.lock
  91 *    ->page_table_lock or pte_lock     (anon_vma_prepare and various)
  92 *
  93 *  ->page_table_lock or pte_lock
  94 *    ->swap_lock               (try_to_unmap_one)
  95 *    ->private_lock            (try_to_unmap_one)
  96 *    ->tree_lock               (try_to_unmap_one)
  97 *    ->zone.lru_lock           (follow_page->mark_page_accessed)
  98 *    ->zone.lru_lock           (check_pte_range->isolate_lru_page)
  99 *    ->private_lock            (page_remove_rmap->set_page_dirty)
 100 *    ->tree_lock               (page_remove_rmap->set_page_dirty)
 101 *    inode_wb_list_lock        (page_remove_rmap->set_page_dirty)
 102 *    ->inode->i_lock           (page_remove_rmap->set_page_dirty)
 103 *    inode_wb_list_lock        (zap_pte_range->set_page_dirty)
 104 *    ->inode->i_lock           (zap_pte_range->set_page_dirty)
 105 *    ->private_lock            (zap_pte_range->__set_page_dirty_buffers)
 106 *
 107 *  (code doesn't rely on that order, so you could switch it around)
 108 *  ->tasklist_lock             (memory_failure, collect_procs_ao)
 109 *    ->i_mmap_lock
 110 */
 111
 112/*
 113 * Delete a page from the page cache and free it. Caller has to make
 114 * sure the page is locked and that nobody else uses it - or that usage
 115 * is safe.  The caller must hold the mapping's tree_lock.
 116 */
 117void __delete_from_page_cache(struct page *page)
 118{
 119        struct address_space *mapping = page->mapping;
 120
 121        radix_tree_delete(&mapping->page_tree, page->index);
 122        page->mapping = NULL;
 123        mapping->nrpages--;
 124        __dec_zone_page_state(page, NR_FILE_PAGES);
 125        if (PageSwapBacked(page))
 126                __dec_zone_page_state(page, NR_SHMEM);
 127        BUG_ON(page_mapped(page));
 128
 129        /*
 130         * Some filesystems seem to re-dirty the page even after
 131         * the VM has canceled the dirty bit (eg ext3 journaling).
 132         *
 133         * Fix it up by doing a final dirty accounting check after
 134         * having removed the page entirely.
 135         */
 136        if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
 137                dec_zone_page_state(page, NR_FILE_DIRTY);
 138                dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
 139        }
 140}
 141
 142/**
 143 * delete_from_page_cache - delete page from page cache
 144 * @page: the page which the kernel is trying to remove from page cache
 145 *
 146 * This must be called only on pages that have been verified to be in the page
 147 * cache and locked.  It will never put the page into the free list, the caller
 148 * has a reference on the page.
 149 */
 150void delete_from_page_cache(struct page *page)
 151{
 152        struct address_space *mapping = page->mapping;
 153        void (*freepage)(struct page *);
 154
 155        BUG_ON(!PageLocked(page));
 156
 157        freepage = mapping->a_ops->freepage;
 158        spin_lock_irq(&mapping->tree_lock);
 159        __delete_from_page_cache(page);
 160        spin_unlock_irq(&mapping->tree_lock);
 161        mem_cgroup_uncharge_cache_page(page);
 162
 163        if (freepage)
 164                freepage(page);
 165        page_cache_release(page);
 166}
 167EXPORT_SYMBOL(delete_from_page_cache);
 168
 169static int sleep_on_page(void *word)
 170{
 171        io_schedule();
 172        return 0;
 173}
 174
 175static int sleep_on_page_killable(void *word)
 176{
 177        sleep_on_page(word);
 178        return fatal_signal_pending(current) ? -EINTR : 0;
 179}
 180
 181/**
 182 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
 183 * @mapping:    address space structure to write
 184 * @start:      offset in bytes where the range starts
 185 * @end:        offset in bytes where the range ends (inclusive)
 186 * @sync_mode:  enable synchronous operation
 187 *
 188 * Start writeback against all of a mapping's dirty pages that lie
 189 * within the byte offsets <start, end> inclusive.
 190 *
 191 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
 192 * opposed to a regular memory cleansing writeback.  The difference between
 193 * these two operations is that if a dirty page/buffer is encountered, it must
 194 * be waited upon, and not just skipped over.
 195 */
 196int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 197                                loff_t end, int sync_mode)
 198{
 199        int ret;
 200        struct writeback_control wbc = {
 201                .sync_mode = sync_mode,
 202                .nr_to_write = LONG_MAX,
 203                .range_start = start,
 204                .range_end = end,
 205        };
 206
 207        if (!mapping_cap_writeback_dirty(mapping))
 208                return 0;
 209
 210        ret = do_writepages(mapping, &wbc);
 211        return ret;
 212}
 213
 214static inline int __filemap_fdatawrite(struct address_space *mapping,
 215        int sync_mode)
 216{
 217        return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 218}
 219
 220int filemap_fdatawrite(struct address_space *mapping)
 221{
 222        return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
 223}
 224EXPORT_SYMBOL(filemap_fdatawrite);
 225
 226int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 227                                loff_t end)
 228{
 229        return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 230}
 231EXPORT_SYMBOL(filemap_fdatawrite_range);
 232
 233/**
 234 * filemap_flush - mostly a non-blocking flush
 235 * @mapping:    target address_space
 236 *
 237 * This is a mostly non-blocking flush.  Not suitable for data-integrity
 238 * purposes - I/O may not be started against all dirty pages.
 239 */
 240int filemap_flush(struct address_space *mapping)
 241{
 242        return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
 243}
 244EXPORT_SYMBOL(filemap_flush);
 245
 246/**
 247 * filemap_fdatawait_range - wait for writeback to complete
 248 * @mapping:            address space structure to wait for
 249 * @start_byte:         offset in bytes where the range starts
 250 * @end_byte:           offset in bytes where the range ends (inclusive)
 251 *
 252 * Walk the list of under-writeback pages of the given address space
 253 * in the given range and wait for all of them.
 254 */
 255int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
 256                            loff_t end_byte)
 257{
 258        pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
 259        pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
 260        struct pagevec pvec;
 261        int nr_pages;
 262        int ret = 0;
 263
 264        if (end_byte < start_byte)
 265                return 0;
 266
 267        pagevec_init(&pvec, 0);
 268        while ((index <= end) &&
 269                        (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
 270                        PAGECACHE_TAG_WRITEBACK,
 271                        min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
 272                unsigned i;
 273
 274                for (i = 0; i < nr_pages; i++) {
 275                        struct page *page = pvec.pages[i];
 276
 277                        /* until radix tree lookup accepts end_index */
 278                        if (page->index > end)
 279                                continue;
 280
 281                        wait_on_page_writeback(page);
 282                        if (TestClearPageError(page))
 283                                ret = -EIO;
 284                }
 285                pagevec_release(&pvec);
 286                cond_resched();
 287        }
 288
 289        /* Check for outstanding write errors */
 290        if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
 291                ret = -ENOSPC;
 292        if (test_and_clear_bit(AS_EIO, &mapping->flags))
 293                ret = -EIO;
 294
 295        return ret;
 296}
 297EXPORT_SYMBOL(filemap_fdatawait_range);
 298
 299/**
 300 * filemap_fdatawait - wait for all under-writeback pages to complete
 301 * @mapping: address space structure to wait for
 302 *
 303 * Walk the list of under-writeback pages of the given address space
 304 * and wait for all of them.
 305 */
 306int filemap_fdatawait(struct address_space *mapping)
 307{
 308        loff_t i_size = i_size_read(mapping->host);
 309
 310        if (i_size == 0)
 311                return 0;
 312
 313        return filemap_fdatawait_range(mapping, 0, i_size - 1);
 314}
 315EXPORT_SYMBOL(filemap_fdatawait);
 316
 317int filemap_write_and_wait(struct address_space *mapping)
 318{
 319        int err = 0;
 320
 321        if (mapping->nrpages) {
 322                err = filemap_fdatawrite(mapping);
 323                /*
 324                 * Even if the above returned error, the pages may be
 325                 * written partially (e.g. -ENOSPC), so we wait for it.
 326                 * But the -EIO is special case, it may indicate the worst
 327                 * thing (e.g. bug) happened, so we avoid waiting for it.
 328                 */
 329                if (err != -EIO) {
 330                        int err2 = filemap_fdatawait(mapping);
 331                        if (!err)
 332                                err = err2;
 333                }
 334        }
 335        return err;
 336}
 337EXPORT_SYMBOL(filemap_write_and_wait);
 338
 339/**
 340 * filemap_write_and_wait_range - write out & wait on a file range
 341 * @mapping:    the address_space for the pages
 342 * @lstart:     offset in bytes where the range starts
 343 * @lend:       offset in bytes where the range ends (inclusive)
 344 *
 345 * Write out and wait upon file offsets lstart->lend, inclusive.
 346 *
 347 * Note that `lend' is inclusive (describes the last byte to be written) so
 348 * that this function can be used to write to the very end-of-file (end = -1).
 349 */
 350int filemap_write_and_wait_range(struct address_space *mapping,
 351                                 loff_t lstart, loff_t lend)
 352{
 353        int err = 0;
 354
 355        if (mapping->nrpages) {
 356                err = __filemap_fdatawrite_range(mapping, lstart, lend,
 357                                                 WB_SYNC_ALL);
 358                /* See comment of filemap_write_and_wait() */
 359                if (err != -EIO) {
 360                        int err2 = filemap_fdatawait_range(mapping,
 361                                                lstart, lend);
 362                        if (!err)
 363                                err = err2;
 364                }
 365        }
 366        return err;
 367}
 368EXPORT_SYMBOL(filemap_write_and_wait_range);
 369
 370/**
 371 * replace_page_cache_page - replace a pagecache page with a new one
 372 * @old:        page to be replaced
 373 * @new:        page to replace with
 374 * @gfp_mask:   allocation mode
 375 *
 376 * This function replaces a page in the pagecache with a new one.  On
 377 * success it acquires the pagecache reference for the new page and
 378 * drops it for the old page.  Both the old and new pages must be
 379 * locked.  This function does not add the new page to the LRU, the
 380 * caller must do that.
 381 *
 382 * The remove + add is atomic.  The only way this function can fail is
 383 * memory allocation failure.
 384 */
 385int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 386{
 387        int error;
 388        struct mem_cgroup *memcg = NULL;
 389
 390        VM_BUG_ON(!PageLocked(old));
 391        VM_BUG_ON(!PageLocked(new));
 392        VM_BUG_ON(new->mapping);
 393
 394        /*
 395         * This is not page migration, but prepare_migration and
 396         * end_migration does enough work for charge replacement.
 397         *
 398         * In the longer term we probably want a specialized function
 399         * for moving the charge from old to new in a more efficient
 400         * manner.
 401         */
 402        error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
 403        if (error)
 404                return error;
 405
 406        error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
 407        if (!error) {
 408                struct address_space *mapping = old->mapping;
 409                void (*freepage)(struct page *);
 410
 411                pgoff_t offset = old->index;
 412                freepage = mapping->a_ops->freepage;
 413
 414                page_cache_get(new);
 415                new->mapping = mapping;
 416                new->index = offset;
 417
 418                spin_lock_irq(&mapping->tree_lock);
 419                __delete_from_page_cache(old);
 420                error = radix_tree_insert(&mapping->page_tree, offset, new);
 421                BUG_ON(error);
 422                mapping->nrpages++;
 423                __inc_zone_page_state(new, NR_FILE_PAGES);
 424                if (PageSwapBacked(new))
 425                        __inc_zone_page_state(new, NR_SHMEM);
 426                spin_unlock_irq(&mapping->tree_lock);
 427                radix_tree_preload_end();
 428                if (freepage)
 429                        freepage(old);
 430                page_cache_release(old);
 431                mem_cgroup_end_migration(memcg, old, new, true);
 432        } else {
 433                mem_cgroup_end_migration(memcg, old, new, false);
 434        }
 435
 436        return error;
 437}
 438EXPORT_SYMBOL_GPL(replace_page_cache_page);
 439
 440/**
 441 * add_to_page_cache_locked - add a locked page to the pagecache
 442 * @page:       page to add
 443 * @mapping:    the page's address_space
 444 * @offset:     page index
 445 * @gfp_mask:   page allocation mode
 446 *
 447 * This function is used to add a page to the pagecache. It must be locked.
 448 * This function does not add the page to the LRU.  The caller must do that.
 449 */
 450int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 451                pgoff_t offset, gfp_t gfp_mask)
 452{
 453        int error;
 454
 455        VM_BUG_ON(!PageLocked(page));
 456
 457        error = mem_cgroup_cache_charge(page, current->mm,
 458                                        gfp_mask & GFP_RECLAIM_MASK);
 459        if (error)
 460                goto out;
 461
 462        error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
 463        if (error == 0) {
 464                page_cache_get(page);
 465                page->mapping = mapping;
 466                page->index = offset;
 467
 468                spin_lock_irq(&mapping->tree_lock);
 469                error = radix_tree_insert(&mapping->page_tree, offset, page);
 470                if (likely(!error)) {
 471                        mapping->nrpages++;
 472                        __inc_zone_page_state(page, NR_FILE_PAGES);
 473                        if (PageSwapBacked(page))
 474                                __inc_zone_page_state(page, NR_SHMEM);
 475                        spin_unlock_irq(&mapping->tree_lock);
 476                } else {
 477                        page->mapping = NULL;
 478                        spin_unlock_irq(&mapping->tree_lock);
 479                        mem_cgroup_uncharge_cache_page(page);
 480                        page_cache_release(page);
 481                }
 482                radix_tree_preload_end();
 483        } else
 484                mem_cgroup_uncharge_cache_page(page);
 485out:
 486        return error;
 487}
 488EXPORT_SYMBOL(add_to_page_cache_locked);
 489
 490int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 491                                pgoff_t offset, gfp_t gfp_mask)
 492{
 493        int ret;
 494
 495        /*
 496         * Splice_read and readahead add shmem/tmpfs pages into the page cache
 497         * before shmem_readpage has a chance to mark them as SwapBacked: they
 498         * need to go on the anon lru below, and mem_cgroup_cache_charge
 499         * (called in add_to_page_cache) needs to know where they're going too.
 500         */
 501        if (mapping_cap_swap_backed(mapping))
 502                SetPageSwapBacked(page);
 503
 504        ret = add_to_page_cache(page, mapping, offset, gfp_mask);
 505        if (ret == 0) {
 506                if (page_is_file_cache(page))
 507                        lru_cache_add_file(page);
 508                else
 509                        lru_cache_add_anon(page);
 510        }
 511        return ret;
 512}
 513EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
 514
 515#ifdef CONFIG_NUMA
 516struct page *__page_cache_alloc(gfp_t gfp)
 517{
 518        int n;
 519        struct page *page;
 520
 521        if (cpuset_do_page_mem_spread()) {
 522                get_mems_allowed();
 523                n = cpuset_mem_spread_node();
 524                page = alloc_pages_exact_node(n, gfp, 0);
 525                put_mems_allowed();
 526                return page;
 527        }
 528        return alloc_pages(gfp, 0);
 529}
 530EXPORT_SYMBOL(__page_cache_alloc);
 531#endif
 532
 533/*
 534 * In order to wait for pages to become available there must be
 535 * waitqueues associated with pages. By using a hash table of
 536 * waitqueues where the bucket discipline is to maintain all
 537 * waiters on the same queue and wake all when any of the pages
 538 * become available, and for the woken contexts to check to be
 539 * sure the appropriate page became available, this saves space
 540 * at a cost of "thundering herd" phenomena during rare hash
 541 * collisions.
 542 */
 543static wait_queue_head_t *page_waitqueue(struct page *page)
 544{
 545        const struct zone *zone = page_zone(page);
 546
 547        return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
 548}
 549
 550static inline void wake_up_page(struct page *page, int bit)
 551{
 552        __wake_up_bit(page_waitqueue(page), &page->flags, bit);
 553}
 554
 555void wait_on_page_bit(struct page *page, int bit_nr)
 556{
 557        DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 558
 559        if (test_bit(bit_nr, &page->flags))
 560                __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
 561                                                        TASK_UNINTERRUPTIBLE);
 562}
 563EXPORT_SYMBOL(wait_on_page_bit);
 564
 565/**
 566 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
 567 * @page: Page defining the wait queue of interest
 568 * @waiter: Waiter to add to the queue
 569 *
 570 * Add an arbitrary @waiter to the wait queue for the nominated @page.
 571 */
 572void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
 573{
 574        wait_queue_head_t *q = page_waitqueue(page);
 575        unsigned long flags;
 576
 577        spin_lock_irqsave(&q->lock, flags);
 578        __add_wait_queue(q, waiter);
 579        spin_unlock_irqrestore(&q->lock, flags);
 580}
 581EXPORT_SYMBOL_GPL(add_page_wait_queue);
 582
 583/**
 584 * unlock_page - unlock a locked page
 585 * @page: the page
 586 *
 587 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
 588 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
 589 * mechananism between PageLocked pages and PageWriteback pages is shared.
 590 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
 591 *
 592 * The mb is necessary to enforce ordering between the clear_bit and the read
 593 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
 594 */
 595void unlock_page(struct page *page)
 596{
 597        VM_BUG_ON(!PageLocked(page));
 598        clear_bit_unlock(PG_locked, &page->flags);
 599        smp_mb__after_clear_bit();
 600        wake_up_page(page, PG_locked);
 601}
 602EXPORT_SYMBOL(unlock_page);
 603
 604/**
 605 * end_page_writeback - end writeback against a page
 606 * @page: the page
 607 */
 608void end_page_writeback(struct page *page)
 609{
 610        if (TestClearPageReclaim(page))
 611                rotate_reclaimable_page(page);
 612
 613        if (!test_clear_page_writeback(page))
 614                BUG();
 615
 616        smp_mb__after_clear_bit();
 617        wake_up_page(page, PG_writeback);
 618}
 619EXPORT_SYMBOL(end_page_writeback);
 620
 621/**
 622 * __lock_page - get a lock on the page, assuming we need to sleep to get it
 623 * @page: the page to lock
 624 */
 625void __lock_page(struct page *page)
 626{
 627        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 628
 629        __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
 630                                                        TASK_UNINTERRUPTIBLE);
 631}
 632EXPORT_SYMBOL(__lock_page);
 633
 634int __lock_page_killable(struct page *page)
 635{
 636        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 637
 638        return __wait_on_bit_lock(page_waitqueue(page), &wait,
 639                                        sleep_on_page_killable, TASK_KILLABLE);
 640}
 641EXPORT_SYMBOL_GPL(__lock_page_killable);
 642
 643int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 644                         unsigned int flags)
 645{
 646        if (!(flags & FAULT_FLAG_ALLOW_RETRY)) {
 647                __lock_page(page);
 648                return 1;
 649        } else {
 650                if (!(flags & FAULT_FLAG_RETRY_NOWAIT)) {
 651                        up_read(&mm->mmap_sem);
 652                        wait_on_page_locked(page);
 653                }
 654                return 0;
 655        }
 656}
 657
 658/**
 659 * find_get_page - find and get a page reference
 660 * @mapping: the address_space to search
 661 * @offset: the page index
 662 *
 663 * Is there a pagecache struct page at the given (mapping, offset) tuple?
 664 * If yes, increment its refcount and return it; if no, return NULL.
 665 */
 666struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
 667{
 668        void **pagep;
 669        struct page *page;
 670
 671        rcu_read_lock();
 672repeat:
 673        page = NULL;
 674        pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
 675        if (pagep) {
 676                page = radix_tree_deref_slot(pagep);
 677                if (unlikely(!page))
 678                        goto out;
 679                if (radix_tree_deref_retry(page))
 680                        goto repeat;
 681
 682                if (!page_cache_get_speculative(page))
 683                        goto repeat;
 684
 685                /*
 686                 * Has the page moved?
 687                 * This is part of the lockless pagecache protocol. See
 688                 * include/linux/pagemap.h for details.
 689                 */
 690                if (unlikely(page != *pagep)) {
 691                        page_cache_release(page);
 692                        goto repeat;
 693                }
 694        }
 695out:
 696        rcu_read_unlock();
 697
 698        return page;
 699}
 700EXPORT_SYMBOL(find_get_page);
 701
 702/**
 703 * find_lock_page - locate, pin and lock a pagecache page
 704 * @mapping: the address_space to search
 705 * @offset: the page index
 706 *
 707 * Locates the desired pagecache page, locks it, increments its reference
 708 * count and returns its address.
 709 *
 710 * Returns zero if the page was not present. find_lock_page() may sleep.
 711 */
 712struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
 713{
 714        struct page *page;
 715
 716repeat:
 717        page = find_get_page(mapping, offset);
 718        if (page) {
 719                lock_page(page);
 720                /* Has the page been truncated? */
 721                if (unlikely(page->mapping != mapping)) {
 722                        unlock_page(page);
 723                        page_cache_release(page);
 724                        goto repeat;
 725                }
 726                VM_BUG_ON(page->index != offset);
 727        }
 728        return page;
 729}
 730EXPORT_SYMBOL(find_lock_page);
 731
 732/**
 733 * find_or_create_page - locate or add a pagecache page
 734 * @mapping: the page's address_space
 735 * @index: the page's index into the mapping
 736 * @gfp_mask: page allocation mode
 737 *
 738 * Locates a page in the pagecache.  If the page is not present, a new page
 739 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
 740 * LRU list.  The returned page is locked and has its reference count
 741 * incremented.
 742 *
 743 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
 744 * allocation!
 745 *
 746 * find_or_create_page() returns the desired page's address, or zero on
 747 * memory exhaustion.
 748 */
 749struct page *find_or_create_page(struct address_space *mapping,
 750                pgoff_t index, gfp_t gfp_mask)
 751{
 752        struct page *page;
 753        int err;
 754repeat:
 755        page = find_lock_page(mapping, index);
 756        if (!page) {
 757                page = __page_cache_alloc(gfp_mask);
 758                if (!page)
 759                        return NULL;
 760                /*
 761                 * We want a regular kernel memory (not highmem or DMA etc)
 762                 * allocation for the radix tree nodes, but we need to honour
 763                 * the context-specific requirements the caller has asked for.
 764                 * GFP_RECLAIM_MASK collects those requirements.
 765                 */
 766                err = add_to_page_cache_lru(page, mapping, index,
 767                        (gfp_mask & GFP_RECLAIM_MASK));
 768                if (unlikely(err)) {
 769                        page_cache_release(page);
 770                        page = NULL;
 771                        if (err == -EEXIST)
 772                                goto repeat;
 773                }
 774        }
 775        return page;
 776}
 777EXPORT_SYMBOL(find_or_create_page);
 778
 779/**
 780 * find_get_pages - gang pagecache lookup
 781 * @mapping:    The address_space to search
 782 * @start:      The starting page index
 783 * @nr_pages:   The maximum number of pages
 784 * @pages:      Where the resulting pages are placed
 785 *
 786 * find_get_pages() will search for and return a group of up to
 787 * @nr_pages pages in the mapping.  The pages are placed at @pages.
 788 * find_get_pages() takes a reference against the returned pages.
 789 *
 790 * The search returns a group of mapping-contiguous pages with ascending
 791 * indexes.  There may be holes in the indices due to not-present pages.
 792 *
 793 * find_get_pages() returns the number of pages which were found.
 794 */
 795unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 796                            unsigned int nr_pages, struct page **pages)
 797{
 798        unsigned int i;
 799        unsigned int ret;
 800        unsigned int nr_found;
 801
 802        rcu_read_lock();
 803restart:
 804        nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
 805                                (void ***)pages, start, nr_pages);
 806        ret = 0;
 807        for (i = 0; i < nr_found; i++) {
 808                struct page *page;
 809repeat:
 810                page = radix_tree_deref_slot((void **)pages[i]);
 811                if (unlikely(!page))
 812                        continue;
 813
 814                /*
 815                 * This can only trigger when the entry at index 0 moves out
 816                 * of or back to the root: none yet gotten, safe to restart.
 817                 */
 818                if (radix_tree_deref_retry(page)) {
 819                        WARN_ON(start | i);
 820                        goto restart;
 821                }
 822
 823                if (!page_cache_get_speculative(page))
 824                        goto repeat;
 825
 826                /* Has the page moved? */
 827                if (unlikely(page != *((void **)pages[i]))) {
 828                        page_cache_release(page);
 829                        goto repeat;
 830                }
 831
 832                pages[ret] = page;
 833                ret++;
 834        }
 835
 836        /*
 837         * If all entries were removed before we could secure them,
 838         * try again, because callers stop trying once 0 is returned.
 839         */
 840        if (unlikely(!ret && nr_found))
 841                goto restart;
 842        rcu_read_unlock();
 843        return ret;
 844}
 845
 846/**
 847 * find_get_pages_contig - gang contiguous pagecache lookup
 848 * @mapping:    The address_space to search
 849 * @index:      The starting page index
 850 * @nr_pages:   The maximum number of pages
 851 * @pages:      Where the resulting pages are placed
 852 *
 853 * find_get_pages_contig() works exactly like find_get_pages(), except
 854 * that the returned number of pages are guaranteed to be contiguous.
 855 *
 856 * find_get_pages_contig() returns the number of pages which were found.
 857 */
 858unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
 859                               unsigned int nr_pages, struct page **pages)
 860{
 861        unsigned int i;
 862        unsigned int ret;
 863        unsigned int nr_found;
 864
 865        rcu_read_lock();
 866restart:
 867        nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
 868                                (void ***)pages, index, nr_pages);
 869        ret = 0;
 870        for (i = 0; i < nr_found; i++) {
 871                struct page *page;
 872repeat:
 873                page = radix_tree_deref_slot((void **)pages[i]);
 874                if (unlikely(!page))
 875                        continue;
 876
 877                /*
 878                 * This can only trigger when the entry at index 0 moves out
 879                 * of or back to the root: none yet gotten, safe to restart.
 880                 */
 881                if (radix_tree_deref_retry(page))
 882                        goto restart;
 883
 884                if (!page_cache_get_speculative(page))
 885                        goto repeat;
 886
 887                /* Has the page moved? */
 888                if (unlikely(page != *((void **)pages[i]))) {
 889                        page_cache_release(page);
 890                        goto repeat;
 891                }
 892
 893                /*
 894                 * must check mapping and index after taking the ref.
 895                 * otherwise we can get both false positives and false
 896                 * negatives, which is just confusing to the caller.
 897                 */
 898                if (page->mapping == NULL || page->index != index) {
 899                        page_cache_release(page);
 900                        break;
 901                }
 902
 903                pages[ret] = page;
 904                ret++;
 905                index++;
 906        }
 907        rcu_read_unlock();
 908        return ret;
 909}
 910EXPORT_SYMBOL(find_get_pages_contig);
 911
 912/**
 913 * find_get_pages_tag - find and return pages that match @tag
 914 * @mapping:    the address_space to search
 915 * @index:      the starting page index
 916 * @tag:        the tag index
 917 * @nr_pages:   the maximum number of pages
 918 * @pages:      where the resulting pages are placed
 919 *
 920 * Like find_get_pages, except we only return pages which are tagged with
 921 * @tag.   We update @index to index the next page for the traversal.
 922 */
 923unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 924                        int tag, unsigned int nr_pages, struct page **pages)
 925{
 926        unsigned int i;
 927        unsigned int ret;
 928        unsigned int nr_found;
 929
 930        rcu_read_lock();
 931restart:
 932        nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
 933                                (void ***)pages, *index, nr_pages, tag);
 934        ret = 0;
 935        for (i = 0; i < nr_found; i++) {
 936                struct page *page;
 937repeat:
 938                page = radix_tree_deref_slot((void **)pages[i]);
 939                if (unlikely(!page))
 940                        continue;
 941
 942                /*
 943                 * This can only trigger when the entry at index 0 moves out
 944                 * of or back to the root: none yet gotten, safe to restart.
 945                 */
 946                if (radix_tree_deref_retry(page))
 947                        goto restart;
 948
 949                if (!page_cache_get_speculative(page))
 950                        goto repeat;
 951
 952                /* Has the page moved? */
 953                if (unlikely(page != *((void **)pages[i]))) {
 954                        page_cache_release(page);
 955                        goto repeat;
 956                }
 957
 958                pages[ret] = page;
 959                ret++;
 960        }
 961
 962        /*
 963         * If all entries were removed before we could secure them,
 964         * try again, because callers stop trying once 0 is returned.
 965         */
 966        if (unlikely(!ret && nr_found))
 967                goto restart;
 968        rcu_read_unlock();
 969
 970        if (ret)
 971                *index = pages[ret - 1]->index + 1;
 972
 973        return ret;
 974}
 975EXPORT_SYMBOL(find_get_pages_tag);
 976
 977/**
 978 * grab_cache_page_nowait - returns locked page at given index in given cache
 979 * @mapping: target address_space
 980 * @index: the page index
 981 *
 982 * Same as grab_cache_page(), but do not wait if the page is unavailable.
 983 * This is intended for speculative data generators, where the data can
 984 * be regenerated if the page couldn't be grabbed.  This routine should
 985 * be safe to call while holding the lock for another page.
 986 *
 987 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
 988 * and deadlock against the caller's locked page.
 989 */
 990struct page *
 991grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
 992{
 993        struct page *page = find_get_page(mapping, index);
 994
 995        if (page) {
 996                if (trylock_page(page))
 997                        return page;
 998                page_cache_release(page);
 999                return NULL;
1000        }
1001        page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1002        if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1003                page_cache_release(page);
1004                page = NULL;
1005        }
1006        return page;
1007}
1008EXPORT_SYMBOL(grab_cache_page_nowait);
1009
1010/*
1011 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1012 * a _large_ part of the i/o request. Imagine the worst scenario:
1013 *
1014 *      ---R__________________________________________B__________
1015 *         ^ reading here                             ^ bad block(assume 4k)
1016 *
1017 * read(R) => miss => readahead(R...B) => media error => frustrating retries
1018 * => failing the whole request => read(R) => read(R+1) =>
1019 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1020 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1021 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1022 *
1023 * It is going insane. Fix it by quickly scaling down the readahead size.
1024 */
1025static void shrink_readahead_size_eio(struct file *filp,
1026                                        struct file_ra_state *ra)
1027{
1028        ra->ra_pages /= 4;
1029}
1030
1031/**
1032 * do_generic_file_read - generic file read routine
1033 * @filp:       the file to read
1034 * @ppos:       current file position
1035 * @desc:       read_descriptor
1036 * @actor:      read method
1037 *
1038 * This is a generic file read routine, and uses the
1039 * mapping->a_ops->readpage() function for the actual low-level stuff.
1040 *
1041 * This is really ugly. But the goto's actually try to clarify some
1042 * of the logic when it comes to error handling etc.
1043 */
1044static void do_generic_file_read(struct file *filp, loff_t *ppos,
1045                read_descriptor_t *desc, read_actor_t actor)
1046{
1047        struct address_space *mapping = filp->f_mapping;
1048        struct inode *inode = mapping->host;
1049        struct file_ra_state *ra = &filp->f_ra;
1050        pgoff_t index;
1051        pgoff_t last_index;
1052        pgoff_t prev_index;
1053        unsigned long offset;      /* offset into pagecache page */
1054        unsigned int prev_offset;
1055        int error;
1056
1057        index = *ppos >> PAGE_CACHE_SHIFT;
1058        prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1059        prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1060        last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1061        offset = *ppos & ~PAGE_CACHE_MASK;
1062
1063        for (;;) {
1064                struct page *page;
1065                pgoff_t end_index;
1066                loff_t isize;
1067                unsigned long nr, ret;
1068
1069                cond_resched();
1070find_page:
1071                page = find_get_page(mapping, index);
1072                if (!page) {
1073                        page_cache_sync_readahead(mapping,
1074                                        ra, filp,
1075                                        index, last_index - index);
1076                        page = find_get_page(mapping, index);
1077                        if (unlikely(page == NULL))
1078                                goto no_cached_page;
1079                }
1080                if (PageReadahead(page)) {
1081                        page_cache_async_readahead(mapping,
1082                                        ra, filp, page,
1083                                        index, last_index - index);
1084                }
1085                if (!PageUptodate(page)) {
1086                        if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1087                                        !mapping->a_ops->is_partially_uptodate)
1088                                goto page_not_up_to_date;
1089                        if (!trylock_page(page))
1090                                goto page_not_up_to_date;
1091                        /* Did it get truncated before we got the lock? */
1092                        if (!page->mapping)
1093                                goto page_not_up_to_date_locked;
1094                        if (!mapping->a_ops->is_partially_uptodate(page,
1095                                                                desc, offset))
1096                                goto page_not_up_to_date_locked;
1097                        unlock_page(page);
1098                }
1099page_ok:
1100                /*
1101                 * i_size must be checked after we know the page is Uptodate.
1102                 *
1103                 * Checking i_size after the check allows us to calculate
1104                 * the correct value for "nr", which means the zero-filled
1105                 * part of the page is not copied back to userspace (unless
1106                 * another truncate extends the file - this is desired though).
1107                 */
1108
1109                isize = i_size_read(inode);
1110                end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1111                if (unlikely(!isize || index > end_index)) {
1112                        page_cache_release(page);
1113                        goto out;
1114                }
1115
1116                /* nr is the maximum number of bytes to copy from this page */
1117                nr = PAGE_CACHE_SIZE;
1118                if (index == end_index) {
1119                        nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1120                        if (nr <= offset) {
1121                                page_cache_release(page);
1122                                goto out;
1123                        }
1124                }
1125                nr = nr - offset;
1126
1127                /* If users can be writing to this page using arbitrary
1128                 * virtual addresses, take care about potential aliasing
1129                 * before reading the page on the kernel side.
1130                 */
1131                if (mapping_writably_mapped(mapping))
1132                        flush_dcache_page(page);
1133
1134                /*
1135                 * When a sequential read accesses a page several times,
1136                 * only mark it as accessed the first time.
1137                 */
1138                if (prev_index != index || offset != prev_offset)
1139                        mark_page_accessed(page);
1140                prev_index = index;
1141
1142                /*
1143                 * Ok, we have the page, and it's up-to-date, so
1144                 * now we can copy it to user space...
1145                 *
1146                 * The actor routine returns how many bytes were actually used..
1147                 * NOTE! This may not be the same as how much of a user buffer
1148                 * we filled up (we may be padding etc), so we can only update
1149                 * "pos" here (the actor routine has to update the user buffer
1150                 * pointers and the remaining count).
1151                 */
1152                ret = actor(desc, page, offset, nr);
1153                offset += ret;
1154                index += offset >> PAGE_CACHE_SHIFT;
1155                offset &= ~PAGE_CACHE_MASK;
1156                prev_offset = offset;
1157
1158                page_cache_release(page);
1159                if (ret == nr && desc->count)
1160                        continue;
1161                goto out;
1162
1163page_not_up_to_date:
1164                /* Get exclusive access to the page ... */
1165                error = lock_page_killable(page);
1166                if (unlikely(error))
1167                        goto readpage_error;
1168
1169page_not_up_to_date_locked:
1170                /* Did it get truncated before we got the lock? */
1171                if (!page->mapping) {
1172                        unlock_page(page);
1173                        page_cache_release(page);
1174                        continue;
1175                }
1176
1177                /* Did somebody else fill it already? */
1178                if (PageUptodate(page)) {
1179                        unlock_page(page);
1180                        goto page_ok;
1181                }
1182
1183readpage:
1184                /*
1185                 * A previous I/O error may have been due to temporary
1186                 * failures, eg. multipath errors.
1187                 * PG_error will be set again if readpage fails.
1188                 */
1189                ClearPageError(page);
1190                /* Start the actual read. The read will unlock the page. */
1191                error = mapping->a_ops->readpage(filp, page);
1192
1193                if (unlikely(error)) {
1194                        if (error == AOP_TRUNCATED_PAGE) {
1195                                page_cache_release(page);
1196                                goto find_page;
1197                        }
1198                        goto readpage_error;
1199                }
1200
1201                if (!PageUptodate(page)) {
1202                        error = lock_page_killable(page);
1203                        if (unlikely(error))
1204                                goto readpage_error;
1205                        if (!PageUptodate(page)) {
1206                                if (page->mapping == NULL) {
1207                                        /*
1208                                         * invalidate_mapping_pages got it
1209                                         */
1210                                        unlock_page(page);
1211                                        page_cache_release(page);
1212                                        goto find_page;
1213                                }
1214                                unlock_page(page);
1215                                shrink_readahead_size_eio(filp, ra);
1216                                error = -EIO;
1217                                goto readpage_error;
1218                        }
1219                        unlock_page(page);
1220                }
1221
1222                goto page_ok;
1223
1224readpage_error:
1225                /* UHHUH! A synchronous read error occurred. Report it */
1226                desc->error = error;
1227                page_cache_release(page);
1228                goto out;
1229
1230no_cached_page:
1231                /*
1232                 * Ok, it wasn't cached, so we need to create a new
1233                 * page..
1234                 */
1235                page = page_cache_alloc_cold(mapping);
1236                if (!page) {
1237                        desc->error = -ENOMEM;
1238                        goto out;
1239                }
1240                error = add_to_page_cache_lru(page, mapping,
1241                                                index, GFP_KERNEL);
1242                if (error) {
1243                        page_cache_release(page);
1244                        if (error == -EEXIST)
1245                                goto find_page;
1246                        desc->error = error;
1247                        goto out;
1248                }
1249                goto readpage;
1250        }
1251
1252out:
1253        ra->prev_pos = prev_index;
1254        ra->prev_pos <<= PAGE_CACHE_SHIFT;
1255        ra->prev_pos |= prev_offset;
1256
1257        *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1258        file_accessed(filp);
1259}
1260
1261int file_read_actor(read_descriptor_t *desc, struct page *page,
1262                        unsigned long offset, unsigned long size)
1263{
1264        char *kaddr;
1265        unsigned long left, count = desc->count;
1266
1267        if (size > count)
1268                size = count;
1269
1270        /*
1271         * Faults on the destination of a read are common, so do it before
1272         * taking the kmap.
1273         */
1274        if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1275                kaddr = kmap_atomic(page, KM_USER0);
1276                left = __copy_to_user_inatomic(desc->arg.buf,
1277                                                kaddr + offset, size);
1278                kunmap_atomic(kaddr, KM_USER0);
1279                if (left == 0)
1280                        goto success;
1281        }
1282
1283        /* Do it the slow way */
1284        kaddr = kmap(page);
1285        left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1286        kunmap(page);
1287
1288        if (left) {
1289                size -= left;
1290                desc->error = -EFAULT;
1291        }
1292success:
1293        desc->count = count - size;
1294        desc->written += size;
1295        desc->arg.buf += size;
1296        return size;
1297}
1298
1299/*
1300 * Performs necessary checks before doing a write
1301 * @iov:        io vector request
1302 * @nr_segs:    number of segments in the iovec
1303 * @count:      number of bytes to write
1304 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1305 *
1306 * Adjust number of segments and amount of bytes to write (nr_segs should be
1307 * properly initialized first). Returns appropriate error code that caller
1308 * should return or zero in case that write should be allowed.
1309 */
1310int generic_segment_checks(const struct iovec *iov,
1311                        unsigned long *nr_segs, size_t *count, int access_flags)
1312{
1313        unsigned long   seg;
1314        size_t cnt = 0;
1315        for (seg = 0; seg < *nr_segs; seg++) {
1316                const struct iovec *iv = &iov[seg];
1317
1318                /*
1319                 * If any segment has a negative length, or the cumulative
1320                 * length ever wraps negative then return -EINVAL.
1321                 */
1322                cnt += iv->iov_len;
1323                if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1324                        return -EINVAL;
1325                if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1326                        continue;
1327                if (seg == 0)
1328                        return -EFAULT;
1329                *nr_segs = seg;
1330                cnt -= iv->iov_len;     /* This segment is no good */
1331                break;
1332        }
1333        *count = cnt;
1334        return 0;
1335}
1336EXPORT_SYMBOL(generic_segment_checks);
1337
1338/**
1339 * generic_file_aio_read - generic filesystem read routine
1340 * @iocb:       kernel I/O control block
1341 * @iov:        io vector request
1342 * @nr_segs:    number of segments in the iovec
1343 * @pos:        current file position
1344 *
1345 * This is the "read()" routine for all filesystems
1346 * that can use the page cache directly.
1347 */
1348ssize_t
1349generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1350                unsigned long nr_segs, loff_t pos)
1351{
1352        struct file *filp = iocb->ki_filp;
1353        ssize_t retval;
1354        unsigned long seg = 0;
1355        size_t count;
1356        loff_t *ppos = &iocb->ki_pos;
1357        struct blk_plug plug;
1358
1359        count = 0;
1360        retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1361        if (retval)
1362                return retval;
1363
1364        blk_start_plug(&plug);
1365
1366        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1367        if (filp->f_flags & O_DIRECT) {
1368                loff_t size;
1369                struct address_space *mapping;
1370                struct inode *inode;
1371
1372                mapping = filp->f_mapping;
1373                inode = mapping->host;
1374                if (!count)
1375                        goto out; /* skip atime */
1376                size = i_size_read(inode);
1377                if (pos < size) {
1378                        retval = filemap_write_and_wait_range(mapping, pos,
1379                                        pos + iov_length(iov, nr_segs) - 1);
1380                        if (!retval) {
1381                                retval = mapping->a_ops->direct_IO(READ, iocb,
1382                                                        iov, pos, nr_segs);
1383                        }
1384                        if (retval > 0) {
1385                                *ppos = pos + retval;
1386                                count -= retval;
1387                        }
1388
1389                        /*
1390                         * Btrfs can have a short DIO read if we encounter
1391                         * compressed extents, so if there was an error, or if
1392                         * we've already read everything we wanted to, or if
1393                         * there was a short read because we hit EOF, go ahead
1394                         * and return.  Otherwise fallthrough to buffered io for
1395                         * the rest of the read.
1396                         */
1397                        if (retval < 0 || !count || *ppos >= size) {
1398                                file_accessed(filp);
1399                                goto out;
1400                        }
1401                }
1402        }
1403
1404        count = retval;
1405        for (seg = 0; seg < nr_segs; seg++) {
1406                read_descriptor_t desc;
1407                loff_t offset = 0;
1408
1409                /*
1410                 * If we did a short DIO read we need to skip the section of the
1411                 * iov that we've already read data into.
1412                 */
1413                if (count) {
1414                        if (count > iov[seg].iov_len) {
1415                                count -= iov[seg].iov_len;
1416                                continue;
1417                        }
1418                        offset = count;
1419                        count = 0;
1420                }
1421
1422                desc.written = 0;
1423                desc.arg.buf = iov[seg].iov_base + offset;
1424                desc.count = iov[seg].iov_len - offset;
1425                if (desc.count == 0)
1426                        continue;
1427                desc.error = 0;
1428                do_generic_file_read(filp, ppos, &desc, file_read_actor);
1429                retval += desc.written;
1430                if (desc.error) {
1431                        retval = retval ?: desc.error;
1432                        break;
1433                }
1434                if (desc.count > 0)
1435                        break;
1436        }
1437out:
1438        blk_finish_plug(&plug);
1439        return retval;
1440}
1441EXPORT_SYMBOL(generic_file_aio_read);
1442
1443static ssize_t
1444do_readahead(struct address_space *mapping, struct file *filp,
1445             pgoff_t index, unsigned long nr)
1446{
1447        if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1448                return -EINVAL;
1449
1450        force_page_cache_readahead(mapping, filp, index, nr);
1451        return 0;
1452}
1453
1454SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
1455{
1456        ssize_t ret;
1457        struct file *file;
1458
1459        ret = -EBADF;
1460        file = fget(fd);
1461        if (file) {
1462                if (file->f_mode & FMODE_READ) {
1463                        struct address_space *mapping = file->f_mapping;
1464                        pgoff_t start = offset >> PAGE_CACHE_SHIFT;
1465                        pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1466                        unsigned long len = end - start + 1;
1467                        ret = do_readahead(mapping, file, start, len);
1468                }
1469                fput(file);
1470        }
1471        return ret;
1472}
1473#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1474asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
1475{
1476        return SYSC_readahead((int) fd, offset, (size_t) count);
1477}
1478SYSCALL_ALIAS(sys_readahead, SyS_readahead);
1479#endif
1480
1481#ifdef CONFIG_MMU
1482/**
1483 * page_cache_read - adds requested page to the page cache if not already there
1484 * @file:       file to read
1485 * @offset:     page index
1486 *
1487 * This adds the requested page to the page cache if it isn't already there,
1488 * and schedules an I/O to read in its contents from disk.
1489 */
1490static int page_cache_read(struct file *file, pgoff_t offset)
1491{
1492        struct address_space *mapping = file->f_mapping;
1493        struct page *page; 
1494        int ret;
1495
1496        do {
1497                page = page_cache_alloc_cold(mapping);
1498                if (!page)
1499                        return -ENOMEM;
1500
1501                ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1502                if (ret == 0)
1503                        ret = mapping->a_ops->readpage(file, page);
1504                else if (ret == -EEXIST)
1505                        ret = 0; /* losing race to add is OK */
1506
1507                page_cache_release(page);
1508
1509        } while (ret == AOP_TRUNCATED_PAGE);
1510                
1511        return ret;
1512}
1513
1514#define MMAP_LOTSAMISS  (100)
1515
1516/*
1517 * Synchronous readahead happens when we don't even find
1518 * a page in the page cache at all.
1519 */
1520static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1521                                   struct file_ra_state *ra,
1522                                   struct file *file,
1523                                   pgoff_t offset)
1524{
1525        unsigned long ra_pages;
1526        struct address_space *mapping = file->f_mapping;
1527
1528        /* If we don't want any read-ahead, don't bother */
1529        if (VM_RandomReadHint(vma))
1530                return;
1531
1532        if (VM_SequentialReadHint(vma) ||
1533                        offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
1534                page_cache_sync_readahead(mapping, ra, file, offset,
1535                                          ra->ra_pages);
1536                return;
1537        }
1538
1539        if (ra->mmap_miss < INT_MAX)
1540                ra->mmap_miss++;
1541
1542        /*
1543         * Do we miss much more than hit in this file? If so,
1544         * stop bothering with read-ahead. It will only hurt.
1545         */
1546        if (ra->mmap_miss > MMAP_LOTSAMISS)
1547                return;
1548
1549        /*
1550         * mmap read-around
1551         */
1552        ra_pages = max_sane_readahead(ra->ra_pages);
1553        if (ra_pages) {
1554                ra->start = max_t(long, 0, offset - ra_pages/2);
1555                ra->size = ra_pages;
1556                ra->async_size = 0;
1557                ra_submit(ra, mapping, file);
1558        }
1559}
1560
1561/*
1562 * Asynchronous readahead happens when we find the page and PG_readahead,
1563 * so we want to possibly extend the readahead further..
1564 */
1565static void do_async_mmap_readahead(struct vm_area_struct *vma,
1566                                    struct file_ra_state *ra,
1567                                    struct file *file,
1568                                    struct page *page,
1569                                    pgoff_t offset)
1570{
1571        struct address_space *mapping = file->f_mapping;
1572
1573        /* If we don't want any read-ahead, don't bother */
1574        if (VM_RandomReadHint(vma))
1575                return;
1576        if (ra->mmap_miss > 0)
1577                ra->mmap_miss--;
1578        if (PageReadahead(page))
1579                page_cache_async_readahead(mapping, ra, file,
1580                                           page, offset, ra->ra_pages);
1581}
1582
1583/**
1584 * filemap_fault - read in file data for page fault handling
1585 * @vma:        vma in which the fault was taken
1586 * @vmf:        struct vm_fault containing details of the fault
1587 *
1588 * filemap_fault() is invoked via the vma operations vector for a
1589 * mapped memory region to read in file data during a page fault.
1590 *
1591 * The goto's are kind of ugly, but this streamlines the normal case of having
1592 * it in the page cache, and handles the special cases reasonably without
1593 * having a lot of duplicated code.
1594 */
1595int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1596{
1597        int error;
1598        struct file *file = vma->vm_file;
1599        struct address_space *mapping = file->f_mapping;
1600        struct file_ra_state *ra = &file->f_ra;
1601        struct inode *inode = mapping->host;
1602        pgoff_t offset = vmf->pgoff;
1603        struct page *page;
1604        pgoff_t size;
1605        int ret = 0;
1606
1607        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1608        if (offset >= size)
1609                return VM_FAULT_SIGBUS;
1610
1611        /*
1612         * Do we have something in the page cache already?
1613         */
1614        page = find_get_page(mapping, offset);
1615        if (likely(page)) {
1616                /*
1617                 * We found the page, so try async readahead before
1618                 * waiting for the lock.
1619                 */
1620                do_async_mmap_readahead(vma, ra, file, page, offset);
1621        } else {
1622                /* No page in the page cache at all */
1623                do_sync_mmap_readahead(vma, ra, file, offset);
1624                count_vm_event(PGMAJFAULT);
1625                ret = VM_FAULT_MAJOR;
1626retry_find:
1627                page = find_get_page(mapping, offset);
1628                if (!page)
1629                        goto no_cached_page;
1630        }
1631
1632        if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1633                page_cache_release(page);
1634                return ret | VM_FAULT_RETRY;
1635        }
1636
1637        /* Did it get truncated? */
1638        if (unlikely(page->mapping != mapping)) {
1639                unlock_page(page);
1640                put_page(page);
1641                goto retry_find;
1642        }
1643        VM_BUG_ON(page->index != offset);
1644
1645        /*
1646         * We have a locked page in the page cache, now we need to check
1647         * that it's up-to-date. If not, it is going to be due to an error.
1648         */
1649        if (unlikely(!PageUptodate(page)))
1650                goto page_not_uptodate;
1651
1652        /*
1653         * Found the page and have a reference on it.
1654         * We must recheck i_size under page lock.
1655         */
1656        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1657        if (unlikely(offset >= size)) {
1658                unlock_page(page);
1659                page_cache_release(page);
1660                return VM_FAULT_SIGBUS;
1661        }
1662
1663        ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT;
1664        vmf->page = page;
1665        return ret | VM_FAULT_LOCKED;
1666
1667no_cached_page:
1668        /*
1669         * We're only likely to ever get here if MADV_RANDOM is in
1670         * effect.
1671         */
1672        error = page_cache_read(file, offset);
1673
1674        /*
1675         * The page we want has now been added to the page cache.
1676         * In the unlikely event that someone removed it in the
1677         * meantime, we'll just come back here and read it again.
1678         */
1679        if (error >= 0)
1680                goto retry_find;
1681
1682        /*
1683         * An error return from page_cache_read can result if the
1684         * system is low on memory, or a problem occurs while trying
1685         * to schedule I/O.
1686         */
1687        if (error == -ENOMEM)
1688                return VM_FAULT_OOM;
1689        return VM_FAULT_SIGBUS;
1690
1691page_not_uptodate:
1692        /*
1693         * Umm, take care of errors if the page isn't up-to-date.
1694         * Try to re-read it _once_. We do this synchronously,
1695         * because there really aren't any performance issues here
1696         * and we need to check for errors.
1697         */
1698        ClearPageError(page);
1699        error = mapping->a_ops->readpage(file, page);
1700        if (!error) {
1701                wait_on_page_locked(page);
1702                if (!PageUptodate(page))
1703                        error = -EIO;
1704        }
1705        page_cache_release(page);
1706
1707        if (!error || error == AOP_TRUNCATED_PAGE)
1708                goto retry_find;
1709
1710        /* Things didn't work out. Return zero to tell the mm layer so. */
1711        shrink_readahead_size_eio(file, ra);
1712        return VM_FAULT_SIGBUS;
1713}
1714EXPORT_SYMBOL(filemap_fault);
1715
1716const struct vm_operations_struct generic_file_vm_ops = {
1717        .fault          = filemap_fault,
1718};
1719
1720/* This is used for a general mmap of a disk file */
1721
1722int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1723{
1724        struct address_space *mapping = file->f_mapping;
1725
1726        if (!mapping->a_ops->readpage)
1727                return -ENOEXEC;
1728        file_accessed(file);
1729        vma->vm_ops = &generic_file_vm_ops;
1730        vma->vm_flags |= VM_CAN_NONLINEAR;
1731        return 0;
1732}
1733
1734/*
1735 * This is for filesystems which do not implement ->writepage.
1736 */
1737int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1738{
1739        if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1740                return -EINVAL;
1741        return generic_file_mmap(file, vma);
1742}
1743#else
1744int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1745{
1746        return -ENOSYS;
1747}
1748int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1749{
1750        return -ENOSYS;
1751}
1752#endif /* CONFIG_MMU */
1753
1754EXPORT_SYMBOL(generic_file_mmap);
1755EXPORT_SYMBOL(generic_file_readonly_mmap);
1756
1757static struct page *__read_cache_page(struct address_space *mapping,
1758                                pgoff_t index,
1759                                int (*filler)(void *,struct page*),
1760                                void *data,
1761                                gfp_t gfp)
1762{
1763        struct page *page;
1764        int err;
1765repeat:
1766        page = find_get_page(mapping, index);
1767        if (!page) {
1768                page = __page_cache_alloc(gfp | __GFP_COLD);
1769                if (!page)
1770                        return ERR_PTR(-ENOMEM);
1771                err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
1772                if (unlikely(err)) {
1773                        page_cache_release(page);
1774                        if (err == -EEXIST)
1775                                goto repeat;
1776                        /* Presumably ENOMEM for radix tree node */
1777                        return ERR_PTR(err);
1778                }
1779                err = filler(data, page);
1780                if (err < 0) {
1781                        page_cache_release(page);
1782                        page = ERR_PTR(err);
1783                }
1784        }
1785        return page;
1786}
1787
1788static struct page *do_read_cache_page(struct address_space *mapping,
1789                                pgoff_t index,
1790                                int (*filler)(void *,struct page*),
1791                                void *data,
1792                                gfp_t gfp)
1793
1794{
1795        struct page *page;
1796        int err;
1797
1798retry:
1799        page = __read_cache_page(mapping, index, filler, data, gfp);
1800        if (IS_ERR(page))
1801                return page;
1802        if (PageUptodate(page))
1803                goto out;
1804
1805        lock_page(page);
1806        if (!page->mapping) {
1807                unlock_page(page);
1808                page_cache_release(page);
1809                goto retry;
1810        }
1811        if (PageUptodate(page)) {
1812                unlock_page(page);
1813                goto out;
1814        }
1815        err = filler(data, page);
1816        if (err < 0) {
1817                page_cache_release(page);
1818                return ERR_PTR(err);
1819        }
1820out:
1821        mark_page_accessed(page);
1822        return page;
1823}
1824
1825/**
1826 * read_cache_page_async - read into page cache, fill it if needed
1827 * @mapping:    the page's address_space
1828 * @index:      the page index
1829 * @filler:     function to perform the read
1830 * @data:       destination for read data
1831 *
1832 * Same as read_cache_page, but don't wait for page to become unlocked
1833 * after submitting it to the filler.
1834 *
1835 * Read into the page cache. If a page already exists, and PageUptodate() is
1836 * not set, try to fill the page but don't wait for it to become unlocked.
1837 *
1838 * If the page does not get brought uptodate, return -EIO.
1839 */
1840struct page *read_cache_page_async(struct address_space *mapping,
1841                                pgoff_t index,
1842                                int (*filler)(void *,struct page*),
1843                                void *data)
1844{
1845        return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1846}
1847EXPORT_SYMBOL(read_cache_page_async);
1848
1849static struct page *wait_on_page_read(struct page *page)
1850{
1851        if (!IS_ERR(page)) {
1852                wait_on_page_locked(page);
1853                if (!PageUptodate(page)) {
1854                        page_cache_release(page);
1855                        page = ERR_PTR(-EIO);
1856                }
1857        }
1858        return page;
1859}
1860
1861/**
1862 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
1863 * @mapping:    the page's address_space
1864 * @index:      the page index
1865 * @gfp:        the page allocator flags to use if allocating
1866 *
1867 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1868 * any new page allocations done using the specified allocation flags. Note
1869 * that the Radix tree operations will still use GFP_KERNEL, so you can't
1870 * expect to do this atomically or anything like that - but you can pass in
1871 * other page requirements.
1872 *
1873 * If the page does not get brought uptodate, return -EIO.
1874 */
1875struct page *read_cache_page_gfp(struct address_space *mapping,
1876                                pgoff_t index,
1877                                gfp_t gfp)
1878{
1879        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1880
1881        return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1882}
1883EXPORT_SYMBOL(read_cache_page_gfp);
1884
1885/**
1886 * read_cache_page - read into page cache, fill it if needed
1887 * @mapping:    the page's address_space
1888 * @index:      the page index
1889 * @filler:     function to perform the read
1890 * @data:       destination for read data
1891 *
1892 * Read into the page cache. If a page already exists, and PageUptodate() is
1893 * not set, try to fill the page then wait for it to become unlocked.
1894 *
1895 * If the page does not get brought uptodate, return -EIO.
1896 */
1897struct page *read_cache_page(struct address_space *mapping,
1898                                pgoff_t index,
1899                                int (*filler)(void *,struct page*),
1900                                void *data)
1901{
1902        return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1903}
1904EXPORT_SYMBOL(read_cache_page);
1905
1906/*
1907 * The logic we want is
1908 *
1909 *      if suid or (sgid and xgrp)
1910 *              remove privs
1911 */
1912int should_remove_suid(struct dentry *dentry)
1913{
1914        mode_t mode = dentry->d_inode->i_mode;
1915        int kill = 0;
1916
1917        /* suid always must be killed */
1918        if (unlikely(mode & S_ISUID))
1919                kill = ATTR_KILL_SUID;
1920
1921        /*
1922         * sgid without any exec bits is just a mandatory locking mark; leave
1923         * it alone.  If some exec bits are set, it's a real sgid; kill it.
1924         */
1925        if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1926                kill |= ATTR_KILL_SGID;
1927
1928        if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1929                return kill;
1930
1931        return 0;
1932}
1933EXPORT_SYMBOL(should_remove_suid);
1934
1935static int __remove_suid(struct dentry *dentry, int kill)
1936{
1937        struct iattr newattrs;
1938
1939        newattrs.ia_valid = ATTR_FORCE | kill;
1940        return notify_change(dentry, &newattrs);
1941}
1942
1943int file_remove_suid(struct file *file)
1944{
1945        struct dentry *dentry = file->f_path.dentry;
1946        int killsuid = should_remove_suid(dentry);
1947        int killpriv = security_inode_need_killpriv(dentry);
1948        int error = 0;
1949
1950        if (killpriv < 0)
1951                return killpriv;
1952        if (killpriv)
1953                error = security_inode_killpriv(dentry);
1954        if (!error && killsuid)
1955                error = __remove_suid(dentry, killsuid);
1956
1957        return error;
1958}
1959EXPORT_SYMBOL(file_remove_suid);
1960
1961static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1962                        const struct iovec *iov, size_t base, size_t bytes)
1963{
1964        size_t copied = 0, left = 0;
1965
1966        while (bytes) {
1967                char __user *buf = iov->iov_base + base;
1968                int copy = min(bytes, iov->iov_len - base);
1969
1970                base = 0;
1971                left = __copy_from_user_inatomic(vaddr, buf, copy);
1972                copied += copy;
1973                bytes -= copy;
1974                vaddr += copy;
1975                iov++;
1976
1977                if (unlikely(left))
1978                        break;
1979        }
1980        return copied - left;
1981}
1982
1983/*
1984 * Copy as much as we can into the page and return the number of bytes which
1985 * were successfully copied.  If a fault is encountered then return the number of
1986 * bytes which were copied.
1987 */
1988size_t iov_iter_copy_from_user_atomic(struct page *page,
1989                struct iov_iter *i, unsigned long offset, size_t bytes)
1990{
1991        char *kaddr;
1992        size_t copied;
1993
1994        BUG_ON(!in_atomic());
1995        kaddr = kmap_atomic(page, KM_USER0);
1996        if (likely(i->nr_segs == 1)) {
1997                int left;
1998                char __user *buf = i->iov->iov_base + i->iov_offset;
1999                left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
2000                copied = bytes - left;
2001        } else {
2002                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2003                                                i->iov, i->iov_offset, bytes);
2004        }
2005        kunmap_atomic(kaddr, KM_USER0);
2006
2007        return copied;
2008}
2009EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
2010
2011/*
2012 * This has the same sideeffects and return value as
2013 * iov_iter_copy_from_user_atomic().
2014 * The difference is that it attempts to resolve faults.
2015 * Page must not be locked.
2016 */
2017size_t iov_iter_copy_from_user(struct page *page,
2018                struct iov_iter *i, unsigned long offset, size_t bytes)
2019{
2020        char *kaddr;
2021        size_t copied;
2022
2023        kaddr = kmap(page);
2024        if (likely(i->nr_segs == 1)) {
2025                int left;
2026                char __user *buf = i->iov->iov_base + i->iov_offset;
2027                left = __copy_from_user(kaddr + offset, buf, bytes);
2028                copied = bytes - left;
2029        } else {
2030                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2031                                                i->iov, i->iov_offset, bytes);
2032        }
2033        kunmap(page);
2034        return copied;
2035}
2036EXPORT_SYMBOL(iov_iter_copy_from_user);
2037
2038void iov_iter_advance(struct iov_iter *i, size_t bytes)
2039{
2040        BUG_ON(i->count < bytes);
2041
2042        if (likely(i->nr_segs == 1)) {
2043                i->iov_offset += bytes;
2044                i->count -= bytes;
2045        } else {
2046                const struct iovec *iov = i->iov;
2047                size_t base = i->iov_offset;
2048
2049                /*
2050                 * The !iov->iov_len check ensures we skip over unlikely
2051                 * zero-length segments (without overruning the iovec).
2052                 */
2053                while (bytes || unlikely(i->count && !iov->iov_len)) {
2054                        int copy;
2055
2056                        copy = min(bytes, iov->iov_len - base);
2057                        BUG_ON(!i->count || i->count < copy);
2058                        i->count -= copy;
2059                        bytes -= copy;
2060                        base += copy;
2061                        if (iov->iov_len == base) {
2062                                iov++;
2063                                base = 0;
2064                        }
2065                }
2066                i->iov = iov;
2067                i->iov_offset = base;
2068        }
2069}
2070EXPORT_SYMBOL(iov_iter_advance);
2071
2072/*
2073 * Fault in the first iovec of the given iov_iter, to a maximum length
2074 * of bytes. Returns 0 on success, or non-zero if the memory could not be
2075 * accessed (ie. because it is an invalid address).
2076 *
2077 * writev-intensive code may want this to prefault several iovecs -- that
2078 * would be possible (callers must not rely on the fact that _only_ the
2079 * first iovec will be faulted with the current implementation).
2080 */
2081int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2082{
2083        char __user *buf = i->iov->iov_base + i->iov_offset;
2084        bytes = min(bytes, i->iov->iov_len - i->iov_offset);
2085        return fault_in_pages_readable(buf, bytes);
2086}
2087EXPORT_SYMBOL(iov_iter_fault_in_readable);
2088
2089/*
2090 * Return the count of just the current iov_iter segment.
2091 */
2092size_t iov_iter_single_seg_count(struct iov_iter *i)
2093{
2094        const struct iovec *iov = i->iov;
2095        if (i->nr_segs == 1)
2096                return i->count;
2097        else
2098                return min(i->count, iov->iov_len - i->iov_offset);
2099}
2100EXPORT_SYMBOL(iov_iter_single_seg_count);
2101
2102/*
2103 * Performs necessary checks before doing a write
2104 *
2105 * Can adjust writing position or amount of bytes to write.
2106 * Returns appropriate error code that caller should return or
2107 * zero in case that write should be allowed.
2108 */
2109inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2110{
2111        struct inode *inode = file->f_mapping->host;
2112        unsigned long limit = rlimit(RLIMIT_FSIZE);
2113
2114        if (unlikely(*pos < 0))
2115                return -EINVAL;
2116
2117        if (!isblk) {
2118                /* FIXME: this is for backwards compatibility with 2.4 */
2119                if (file->f_flags & O_APPEND)
2120                        *pos = i_size_read(inode);
2121
2122                if (limit != RLIM_INFINITY) {
2123                        if (*pos >= limit) {
2124                                send_sig(SIGXFSZ, current, 0);
2125                                return -EFBIG;
2126                        }
2127                        if (*count > limit - (typeof(limit))*pos) {
2128                                *count = limit - (typeof(limit))*pos;
2129                        }
2130                }
2131        }
2132
2133        /*
2134         * LFS rule
2135         */
2136        if (unlikely(*pos + *count > MAX_NON_LFS &&
2137                                !(file->f_flags & O_LARGEFILE))) {
2138                if (*pos >= MAX_NON_LFS) {
2139                        return -EFBIG;
2140                }
2141                if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2142                        *count = MAX_NON_LFS - (unsigned long)*pos;
2143                }
2144        }
2145
2146        /*
2147         * Are we about to exceed the fs block limit ?
2148         *
2149         * If we have written data it becomes a short write.  If we have
2150         * exceeded without writing data we send a signal and return EFBIG.
2151         * Linus frestrict idea will clean these up nicely..
2152         */
2153        if (likely(!isblk)) {
2154                if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2155                        if (*count || *pos > inode->i_sb->s_maxbytes) {
2156                                return -EFBIG;
2157                        }
2158                        /* zero-length writes at ->s_maxbytes are OK */
2159                }
2160
2161                if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2162                        *count = inode->i_sb->s_maxbytes - *pos;
2163        } else {
2164#ifdef CONFIG_BLOCK
2165                loff_t isize;
2166                if (bdev_read_only(I_BDEV(inode)))
2167                        return -EPERM;
2168                isize = i_size_read(inode);
2169                if (*pos >= isize) {
2170                        if (*count || *pos > isize)
2171                                return -ENOSPC;
2172                }
2173
2174                if (*pos + *count > isize)
2175                        *count = isize - *pos;
2176#else
2177                return -EPERM;
2178#endif
2179        }
2180        return 0;
2181}
2182EXPORT_SYMBOL(generic_write_checks);
2183
2184int pagecache_write_begin(struct file *file, struct address_space *mapping,
2185                                loff_t pos, unsigned len, unsigned flags,
2186                                struct page **pagep, void **fsdata)
2187{
2188        const struct address_space_operations *aops = mapping->a_ops;
2189
2190        return aops->write_begin(file, mapping, pos, len, flags,
2191                                                        pagep, fsdata);
2192}
2193EXPORT_SYMBOL(pagecache_write_begin);
2194
2195int pagecache_write_end(struct file *file, struct address_space *mapping,
2196                                loff_t pos, unsigned len, unsigned copied,
2197                                struct page *page, void *fsdata)
2198{
2199        const struct address_space_operations *aops = mapping->a_ops;
2200
2201        mark_page_accessed(page);
2202        return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2203}
2204EXPORT_SYMBOL(pagecache_write_end);
2205
2206ssize_t
2207generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2208                unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2209                size_t count, size_t ocount)
2210{
2211        struct file     *file = iocb->ki_filp;
2212        struct address_space *mapping = file->f_mapping;
2213        struct inode    *inode = mapping->host;
2214        ssize_t         written;
2215        size_t          write_len;
2216        pgoff_t         end;
2217
2218        if (count != ocount)
2219                *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2220
2221        write_len = iov_length(iov, *nr_segs);
2222        end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2223
2224        written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2225        if (written)
2226                goto out;
2227
2228        /*
2229         * After a write we want buffered reads to be sure to go to disk to get
2230         * the new data.  We invalidate clean cached page from the region we're
2231         * about to write.  We do this *before* the write so that we can return
2232         * without clobbering -EIOCBQUEUED from ->direct_IO().
2233         */
2234        if (mapping->nrpages) {
2235                written = invalidate_inode_pages2_range(mapping,
2236                                        pos >> PAGE_CACHE_SHIFT, end);
2237                /*
2238                 * If a page can not be invalidated, return 0 to fall back
2239                 * to buffered write.
2240                 */
2241                if (written) {
2242                        if (written == -EBUSY)
2243                                return 0;
2244                        goto out;
2245                }
2246        }
2247
2248        written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2249
2250        /*
2251         * Finally, try again to invalidate clean pages which might have been
2252         * cached by non-direct readahead, or faulted in by get_user_pages()
2253         * if the source of the write was an mmap'ed region of the file
2254         * we're writing.  Either one is a pretty crazy thing to do,
2255         * so we don't support it 100%.  If this invalidation
2256         * fails, tough, the write still worked...
2257         */
2258        if (mapping->nrpages) {
2259                invalidate_inode_pages2_range(mapping,
2260                                              pos >> PAGE_CACHE_SHIFT, end);
2261        }
2262
2263        if (written > 0) {
2264                pos += written;
2265                if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2266                        i_size_write(inode, pos);
2267                        mark_inode_dirty(inode);
2268                }
2269                *ppos = pos;
2270        }
2271out:
2272        return written;
2273}
2274EXPORT_SYMBOL(generic_file_direct_write);
2275
2276/*
2277 * Find or create a page at the given pagecache position. Return the locked
2278 * page. This function is specifically for buffered writes.
2279 */
2280struct page *grab_cache_page_write_begin(struct address_space *mapping,
2281                                        pgoff_t index, unsigned flags)
2282{
2283        int status;
2284        struct page *page;
2285        gfp_t gfp_notmask = 0;
2286        if (flags & AOP_FLAG_NOFS)
2287                gfp_notmask = __GFP_FS;
2288repeat:
2289        page = find_lock_page(mapping, index);
2290        if (page)
2291                return page;
2292
2293        page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
2294        if (!page)
2295                return NULL;
2296        status = add_to_page_cache_lru(page, mapping, index,
2297                                                GFP_KERNEL & ~gfp_notmask);
2298        if (unlikely(status)) {
2299                page_cache_release(page);
2300                if (status == -EEXIST)
2301                        goto repeat;
2302                return NULL;
2303        }
2304        return page;
2305}
2306EXPORT_SYMBOL(grab_cache_page_write_begin);
2307
2308static ssize_t generic_perform_write(struct file *file,
2309                                struct iov_iter *i, loff_t pos)
2310{
2311        struct address_space *mapping = file->f_mapping;
2312        const struct address_space_operations *a_ops = mapping->a_ops;
2313        long status = 0;
2314        ssize_t written = 0;
2315        unsigned int flags = 0;
2316
2317        /*
2318         * Copies from kernel address space cannot fail (NFSD is a big user).
2319         */
2320        if (segment_eq(get_fs(), KERNEL_DS))
2321                flags |= AOP_FLAG_UNINTERRUPTIBLE;
2322
2323        do {
2324                struct page *page;
2325                unsigned long offset;   /* Offset into pagecache page */
2326                unsigned long bytes;    /* Bytes to write to page */
2327                size_t copied;          /* Bytes copied from user */
2328                void *fsdata;
2329
2330                offset = (pos & (PAGE_CACHE_SIZE - 1));
2331                bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2332                                                iov_iter_count(i));
2333
2334again:
2335
2336                /*
2337                 * Bring in the user page that we will copy from _first_.
2338                 * Otherwise there's a nasty deadlock on copying from the
2339                 * same page as we're writing to, without it being marked
2340                 * up-to-date.
2341                 *
2342                 * Not only is this an optimisation, but it is also required
2343                 * to check that the address is actually valid, when atomic
2344                 * usercopies are used, below.
2345                 */
2346                if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2347                        status = -EFAULT;
2348                        break;
2349                }
2350
2351                status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2352                                                &page, &fsdata);
2353                if (unlikely(status))
2354                        break;
2355
2356                if (mapping_writably_mapped(mapping))
2357                        flush_dcache_page(page);
2358
2359                pagefault_disable();
2360                copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2361                pagefault_enable();
2362                flush_dcache_page(page);
2363
2364                mark_page_accessed(page);
2365                status = a_ops->write_end(file, mapping, pos, bytes, copied,
2366                                                page, fsdata);
2367                if (unlikely(status < 0))
2368                        break;
2369                copied = status;
2370
2371                cond_resched();
2372
2373                iov_iter_advance(i, copied);
2374                if (unlikely(copied == 0)) {
2375                        /*
2376                         * If we were unable to copy any data at all, we must
2377                         * fall back to a single segment length write.
2378                         *
2379                         * If we didn't fallback here, we could livelock
2380                         * because not all segments in the iov can be copied at
2381                         * once without a pagefault.
2382                         */
2383                        bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2384                                                iov_iter_single_seg_count(i));
2385                        goto again;
2386                }
2387                pos += copied;
2388                written += copied;
2389
2390                balance_dirty_pages_ratelimited(mapping);
2391
2392        } while (iov_iter_count(i));
2393
2394        return written ? written : status;
2395}
2396
2397ssize_t
2398generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2399                unsigned long nr_segs, loff_t pos, loff_t *ppos,
2400                size_t count, ssize_t written)
2401{
2402        struct file *file = iocb->ki_filp;
2403        ssize_t status;
2404        struct iov_iter i;
2405
2406        iov_iter_init(&i, iov, nr_segs, count, written);
2407        status = generic_perform_write(file, &i, pos);
2408
2409        if (likely(status >= 0)) {
2410                written += status;
2411                *ppos = pos + status;
2412        }
2413        
2414        return written ? written : status;
2415}
2416EXPORT_SYMBOL(generic_file_buffered_write);
2417
2418/**
2419 * __generic_file_aio_write - write data to a file
2420 * @iocb:       IO state structure (file, offset, etc.)
2421 * @iov:        vector with data to write
2422 * @nr_segs:    number of segments in the vector
2423 * @ppos:       position where to write
2424 *
2425 * This function does all the work needed for actually writing data to a
2426 * file. It does all basic checks, removes SUID from the file, updates
2427 * modification times and calls proper subroutines depending on whether we
2428 * do direct IO or a standard buffered write.
2429 *
2430 * It expects i_mutex to be grabbed unless we work on a block device or similar
2431 * object which does not need locking at all.
2432 *
2433 * This function does *not* take care of syncing data in case of O_SYNC write.
2434 * A caller has to handle it. This is mainly due to the fact that we want to
2435 * avoid syncing under i_mutex.
2436 */
2437ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2438                                 unsigned long nr_segs, loff_t *ppos)
2439{
2440        struct file *file = iocb->ki_filp;
2441        struct address_space * mapping = file->f_mapping;
2442        size_t ocount;          /* original count */
2443        size_t count;           /* after file limit checks */
2444        struct inode    *inode = mapping->host;
2445        loff_t          pos;
2446        ssize_t         written;
2447        ssize_t         err;
2448
2449        ocount = 0;
2450        err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2451        if (err)
2452                return err;
2453
2454        count = ocount;
2455        pos = *ppos;
2456
2457        vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2458
2459        /* We can write back this queue in page reclaim */
2460        current->backing_dev_info = mapping->backing_dev_info;
2461        written = 0;
2462
2463        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2464        if (err)
2465                goto out;
2466
2467        if (count == 0)
2468                goto out;
2469
2470        err = file_remove_suid(file);
2471        if (err)
2472                goto out;
2473
2474        file_update_time(file);
2475
2476        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2477        if (unlikely(file->f_flags & O_DIRECT)) {
2478                loff_t endbyte;
2479                ssize_t written_buffered;
2480
2481                written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2482                                                        ppos, count, ocount);
2483                if (written < 0 || written == count)
2484                        goto out;
2485                /*
2486                 * direct-io write to a hole: fall through to buffered I/O
2487                 * for completing the rest of the request.
2488                 */
2489                pos += written;
2490                count -= written;
2491                written_buffered = generic_file_buffered_write(iocb, iov,
2492                                                nr_segs, pos, ppos, count,
2493                                                written);
2494                /*
2495                 * If generic_file_buffered_write() retuned a synchronous error
2496                 * then we want to return the number of bytes which were
2497                 * direct-written, or the error code if that was zero.  Note
2498                 * that this differs from normal direct-io semantics, which
2499                 * will return -EFOO even if some bytes were written.
2500                 */
2501                if (written_buffered < 0) {
2502                        err = written_buffered;
2503                        goto out;
2504                }
2505
2506                /*
2507                 * We need to ensure that the page cache pages are written to
2508                 * disk and invalidated to preserve the expected O_DIRECT
2509                 * semantics.
2510                 */
2511                endbyte = pos + written_buffered - written - 1;
2512                err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2513                if (err == 0) {
2514                        written = written_buffered;
2515                        invalidate_mapping_pages(mapping,
2516                                                 pos >> PAGE_CACHE_SHIFT,
2517                                                 endbyte >> PAGE_CACHE_SHIFT);
2518                } else {
2519                        /*
2520                         * We don't know how much we wrote, so just return
2521                         * the number of bytes which were direct-written
2522                         */
2523                }
2524        } else {
2525                written = generic_file_buffered_write(iocb, iov, nr_segs,
2526                                pos, ppos, count, written);
2527        }
2528out:
2529        current->backing_dev_info = NULL;
2530        return written ? written : err;
2531}
2532EXPORT_SYMBOL(__generic_file_aio_write);
2533
2534/**
2535 * generic_file_aio_write - write data to a file
2536 * @iocb:       IO state structure
2537 * @iov:        vector with data to write
2538 * @nr_segs:    number of segments in the vector
2539 * @pos:        position in file where to write
2540 *
2541 * This is a wrapper around __generic_file_aio_write() to be used by most
2542 * filesystems. It takes care of syncing the file in case of O_SYNC file
2543 * and acquires i_mutex as needed.
2544 */
2545ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2546                unsigned long nr_segs, loff_t pos)
2547{
2548        struct file *file = iocb->ki_filp;
2549        struct inode *inode = file->f_mapping->host;
2550        struct blk_plug plug;
2551        ssize_t ret;
2552
2553        BUG_ON(iocb->ki_pos != pos);
2554
2555        mutex_lock(&inode->i_mutex);
2556        blk_start_plug(&plug);
2557        ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2558        mutex_unlock(&inode->i_mutex);
2559
2560        if (ret > 0 || ret == -EIOCBQUEUED) {
2561                ssize_t err;
2562
2563                err = generic_write_sync(file, pos, ret);
2564                if (err < 0 && ret > 0)
2565                        ret = err;
2566        }
2567        blk_finish_plug(&plug);
2568        return ret;
2569}
2570EXPORT_SYMBOL(generic_file_aio_write);
2571
2572/**
2573 * try_to_release_page() - release old fs-specific metadata on a page
2574 *
2575 * @page: the page which the kernel is trying to free
2576 * @gfp_mask: memory allocation flags (and I/O mode)
2577 *
2578 * The address_space is to try to release any data against the page
2579 * (presumably at page->private).  If the release was successful, return `1'.
2580 * Otherwise return zero.
2581 *
2582 * This may also be called if PG_fscache is set on a page, indicating that the
2583 * page is known to the local caching routines.
2584 *
2585 * The @gfp_mask argument specifies whether I/O may be performed to release
2586 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2587 *
2588 */
2589int try_to_release_page(struct page *page, gfp_t gfp_mask)
2590{
2591        struct address_space * const mapping = page->mapping;
2592
2593        BUG_ON(!PageLocked(page));
2594        if (PageWriteback(page))
2595                return 0;
2596
2597        if (mapping && mapping->a_ops->releasepage)
2598                return mapping->a_ops->releasepage(page, gfp_mask);
2599        return try_to_free_buffers(page);
2600}
2601
2602EXPORT_SYMBOL(try_to_release_page);
2603