linux/mm/truncate.c
<<
>>
Prefs
   1/*
   2 * mm/truncate.c - code for taking down pages from address_spaces
   3 *
   4 * Copyright (C) 2002, Linus Torvalds
   5 *
   6 * 10Sep2002    Andrew Morton
   7 *              Initial version.
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/backing-dev.h>
  12#include <linux/dax.h>
  13#include <linux/gfp.h>
  14#include <linux/mm.h>
  15#include <linux/swap.h>
  16#include <linux/export.h>
  17#include <linux/pagemap.h>
  18#include <linux/highmem.h>
  19#include <linux/pagevec.h>
  20#include <linux/task_io_accounting_ops.h>
  21#include <linux/buffer_head.h>  /* grr. try_to_release_page,
  22                                   do_invalidatepage */
  23#include <linux/shmem_fs.h>
  24#include <linux/cleancache.h>
  25#include <linux/rmap.h>
  26#include "internal.h"
  27
  28static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
  29                               void *entry)
  30{
  31        struct radix_tree_node *node;
  32        void **slot;
  33
  34        spin_lock_irq(&mapping->tree_lock);
  35        /*
  36         * Regular page slots are stabilized by the page lock even
  37         * without the tree itself locked.  These unlocked entries
  38         * need verification under the tree lock.
  39         */
  40        if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
  41                goto unlock;
  42        if (*slot != entry)
  43                goto unlock;
  44        __radix_tree_replace(&mapping->page_tree, node, slot, NULL,
  45                             workingset_update_node, mapping);
  46        mapping->nrexceptional--;
  47unlock:
  48        spin_unlock_irq(&mapping->tree_lock);
  49}
  50
  51/*
  52 * Unconditionally remove exceptional entry. Usually called from truncate path.
  53 */
  54static void truncate_exceptional_entry(struct address_space *mapping,
  55                                       pgoff_t index, void *entry)
  56{
  57        /* Handled by shmem itself */
  58        if (shmem_mapping(mapping))
  59                return;
  60
  61        if (dax_mapping(mapping)) {
  62                dax_delete_mapping_entry(mapping, index);
  63                return;
  64        }
  65        clear_shadow_entry(mapping, index, entry);
  66}
  67
  68/*
  69 * Invalidate exceptional entry if easily possible. This handles exceptional
  70 * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
  71 * clean entries.
  72 */
  73static int invalidate_exceptional_entry(struct address_space *mapping,
  74                                        pgoff_t index, void *entry)
  75{
  76        /* Handled by shmem itself */
  77        if (shmem_mapping(mapping))
  78                return 1;
  79        if (dax_mapping(mapping))
  80                return dax_invalidate_mapping_entry(mapping, index);
  81        clear_shadow_entry(mapping, index, entry);
  82        return 1;
  83}
  84
  85/*
  86 * Invalidate exceptional entry if clean. This handles exceptional entries for
  87 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
  88 */
  89static int invalidate_exceptional_entry2(struct address_space *mapping,
  90                                         pgoff_t index, void *entry)
  91{
  92        /* Handled by shmem itself */
  93        if (shmem_mapping(mapping))
  94                return 1;
  95        if (dax_mapping(mapping))
  96                return dax_invalidate_mapping_entry_sync(mapping, index);
  97        clear_shadow_entry(mapping, index, entry);
  98        return 1;
  99}
 100
 101/**
 102 * do_invalidatepage - invalidate part or all of a page
 103 * @page: the page which is affected
 104 * @offset: start of the range to invalidate
 105 * @length: length of the range to invalidate
 106 *
 107 * do_invalidatepage() is called when all or part of the page has become
 108 * invalidated by a truncate operation.
 109 *
 110 * do_invalidatepage() does not have to release all buffers, but it must
 111 * ensure that no dirty buffer is left outside @offset and that no I/O
 112 * is underway against any of the blocks which are outside the truncation
 113 * point.  Because the caller is about to free (and possibly reuse) those
 114 * blocks on-disk.
 115 */
 116void do_invalidatepage(struct page *page, unsigned int offset,
 117                       unsigned int length)
 118{
 119        void (*invalidatepage)(struct page *, unsigned int, unsigned int);
 120
 121        invalidatepage = page->mapping->a_ops->invalidatepage;
 122#ifdef CONFIG_BLOCK
 123        if (!invalidatepage)
 124                invalidatepage = block_invalidatepage;
 125#endif
 126        if (invalidatepage)
 127                (*invalidatepage)(page, offset, length);
 128}
 129
 130/*
 131 * If truncate cannot remove the fs-private metadata from the page, the page
 132 * becomes orphaned.  It will be left on the LRU and may even be mapped into
 133 * user pagetables if we're racing with filemap_fault().
 134 *
 135 * We need to bale out if page->mapping is no longer equal to the original
 136 * mapping.  This happens a) when the VM reclaimed the page while we waited on
 137 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
 138 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 139 */
 140static int
 141truncate_complete_page(struct address_space *mapping, struct page *page)
 142{
 143        if (page->mapping != mapping)
 144                return -EIO;
 145
 146        if (page_has_private(page))
 147                do_invalidatepage(page, 0, PAGE_SIZE);
 148
 149        /*
 150         * Some filesystems seem to re-dirty the page even after
 151         * the VM has canceled the dirty bit (eg ext3 journaling).
 152         * Hence dirty accounting check is placed after invalidation.
 153         */
 154        cancel_dirty_page(page);
 155        ClearPageMappedToDisk(page);
 156        delete_from_page_cache(page);
 157        return 0;
 158}
 159
 160/*
 161 * This is for invalidate_mapping_pages().  That function can be called at
 162 * any time, and is not supposed to throw away dirty pages.  But pages can
 163 * be marked dirty at any time too, so use remove_mapping which safely
 164 * discards clean, unused pages.
 165 *
 166 * Returns non-zero if the page was successfully invalidated.
 167 */
 168static int
 169invalidate_complete_page(struct address_space *mapping, struct page *page)
 170{
 171        int ret;
 172
 173        if (page->mapping != mapping)
 174                return 0;
 175
 176        if (page_has_private(page) && !try_to_release_page(page, 0))
 177                return 0;
 178
 179        ret = remove_mapping(mapping, page);
 180
 181        return ret;
 182}
 183
 184int truncate_inode_page(struct address_space *mapping, struct page *page)
 185{
 186        loff_t holelen;
 187        VM_BUG_ON_PAGE(PageTail(page), page);
 188
 189        holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
 190        if (page_mapped(page)) {
 191                unmap_mapping_range(mapping,
 192                                   (loff_t)page->index << PAGE_SHIFT,
 193                                   holelen, 0);
 194        }
 195        return truncate_complete_page(mapping, page);
 196}
 197
 198/*
 199 * Used to get rid of pages on hardware memory corruption.
 200 */
 201int generic_error_remove_page(struct address_space *mapping, struct page *page)
 202{
 203        if (!mapping)
 204                return -EINVAL;
 205        /*
 206         * Only punch for normal data pages for now.
 207         * Handling other types like directories would need more auditing.
 208         */
 209        if (!S_ISREG(mapping->host->i_mode))
 210                return -EIO;
 211        return truncate_inode_page(mapping, page);
 212}
 213EXPORT_SYMBOL(generic_error_remove_page);
 214
 215/*
 216 * Safely invalidate one page from its pagecache mapping.
 217 * It only drops clean, unused pages. The page must be locked.
 218 *
 219 * Returns 1 if the page is successfully invalidated, otherwise 0.
 220 */
 221int invalidate_inode_page(struct page *page)
 222{
 223        struct address_space *mapping = page_mapping(page);
 224        if (!mapping)
 225                return 0;
 226        if (PageDirty(page) || PageWriteback(page))
 227                return 0;
 228        if (page_mapped(page))
 229                return 0;
 230        return invalidate_complete_page(mapping, page);
 231}
 232
 233/**
 234 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
 235 * @mapping: mapping to truncate
 236 * @lstart: offset from which to truncate
 237 * @lend: offset to which to truncate (inclusive)
 238 *
 239 * Truncate the page cache, removing the pages that are between
 240 * specified offsets (and zeroing out partial pages
 241 * if lstart or lend + 1 is not page aligned).
 242 *
 243 * Truncate takes two passes - the first pass is nonblocking.  It will not
 244 * block on page locks and it will not block on writeback.  The second pass
 245 * will wait.  This is to prevent as much IO as possible in the affected region.
 246 * The first pass will remove most pages, so the search cost of the second pass
 247 * is low.
 248 *
 249 * We pass down the cache-hot hint to the page freeing code.  Even if the
 250 * mapping is large, it is probably the case that the final pages are the most
 251 * recently touched, and freeing happens in ascending file offset order.
 252 *
 253 * Note that since ->invalidatepage() accepts range to invalidate
 254 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 255 * page aligned properly.
 256 */
 257void truncate_inode_pages_range(struct address_space *mapping,
 258                                loff_t lstart, loff_t lend)
 259{
 260        pgoff_t         start;          /* inclusive */
 261        pgoff_t         end;            /* exclusive */
 262        unsigned int    partial_start;  /* inclusive */
 263        unsigned int    partial_end;    /* exclusive */
 264        struct pagevec  pvec;
 265        pgoff_t         indices[PAGEVEC_SIZE];
 266        pgoff_t         index;
 267        int             i;
 268
 269        cleancache_invalidate_inode(mapping);
 270        if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
 271                return;
 272
 273        /* Offsets within partial pages */
 274        partial_start = lstart & (PAGE_SIZE - 1);
 275        partial_end = (lend + 1) & (PAGE_SIZE - 1);
 276
 277        /*
 278         * 'start' and 'end' always covers the range of pages to be fully
 279         * truncated. Partial pages are covered with 'partial_start' at the
 280         * start of the range and 'partial_end' at the end of the range.
 281         * Note that 'end' is exclusive while 'lend' is inclusive.
 282         */
 283        start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
 284        if (lend == -1)
 285                /*
 286                 * lend == -1 indicates end-of-file so we have to set 'end'
 287                 * to the highest possible pgoff_t and since the type is
 288                 * unsigned we're using -1.
 289                 */
 290                end = -1;
 291        else
 292                end = (lend + 1) >> PAGE_SHIFT;
 293
 294        pagevec_init(&pvec, 0);
 295        index = start;
 296        while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
 297                        min(end - index, (pgoff_t)PAGEVEC_SIZE),
 298                        indices)) {
 299                for (i = 0; i < pagevec_count(&pvec); i++) {
 300                        struct page *page = pvec.pages[i];
 301
 302                        /* We rely upon deletion not changing page->index */
 303                        index = indices[i];
 304                        if (index >= end)
 305                                break;
 306
 307                        if (radix_tree_exceptional_entry(page)) {
 308                                truncate_exceptional_entry(mapping, index,
 309                                                           page);
 310                                continue;
 311                        }
 312
 313                        if (!trylock_page(page))
 314                                continue;
 315                        WARN_ON(page_to_index(page) != index);
 316                        if (PageWriteback(page)) {
 317                                unlock_page(page);
 318                                continue;
 319                        }
 320                        truncate_inode_page(mapping, page);
 321                        unlock_page(page);
 322                }
 323                pagevec_remove_exceptionals(&pvec);
 324                pagevec_release(&pvec);
 325                cond_resched();
 326                index++;
 327        }
 328
 329        if (partial_start) {
 330                struct page *page = find_lock_page(mapping, start - 1);
 331                if (page) {
 332                        unsigned int top = PAGE_SIZE;
 333                        if (start > end) {
 334                                /* Truncation within a single page */
 335                                top = partial_end;
 336                                partial_end = 0;
 337                        }
 338                        wait_on_page_writeback(page);
 339                        zero_user_segment(page, partial_start, top);
 340                        cleancache_invalidate_page(mapping, page);
 341                        if (page_has_private(page))
 342                                do_invalidatepage(page, partial_start,
 343                                                  top - partial_start);
 344                        unlock_page(page);
 345                        put_page(page);
 346                }
 347        }
 348        if (partial_end) {
 349                struct page *page = find_lock_page(mapping, end);
 350                if (page) {
 351                        wait_on_page_writeback(page);
 352                        zero_user_segment(page, 0, partial_end);
 353                        cleancache_invalidate_page(mapping, page);
 354                        if (page_has_private(page))
 355                                do_invalidatepage(page, 0,
 356                                                  partial_end);
 357                        unlock_page(page);
 358                        put_page(page);
 359                }
 360        }
 361        /*
 362         * If the truncation happened within a single page no pages
 363         * will be released, just zeroed, so we can bail out now.
 364         */
 365        if (start >= end)
 366                return;
 367
 368        index = start;
 369        for ( ; ; ) {
 370                cond_resched();
 371                if (!pagevec_lookup_entries(&pvec, mapping, index,
 372                        min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
 373                        /* If all gone from start onwards, we're done */
 374                        if (index == start)
 375                                break;
 376                        /* Otherwise restart to make sure all gone */
 377                        index = start;
 378                        continue;
 379                }
 380                if (index == start && indices[0] >= end) {
 381                        /* All gone out of hole to be punched, we're done */
 382                        pagevec_remove_exceptionals(&pvec);
 383                        pagevec_release(&pvec);
 384                        break;
 385                }
 386                for (i = 0; i < pagevec_count(&pvec); i++) {
 387                        struct page *page = pvec.pages[i];
 388
 389                        /* We rely upon deletion not changing page->index */
 390                        index = indices[i];
 391                        if (index >= end) {
 392                                /* Restart punch to make sure all gone */
 393                                index = start - 1;
 394                                break;
 395                        }
 396
 397                        if (radix_tree_exceptional_entry(page)) {
 398                                truncate_exceptional_entry(mapping, index,
 399                                                           page);
 400                                continue;
 401                        }
 402
 403                        lock_page(page);
 404                        WARN_ON(page_to_index(page) != index);
 405                        wait_on_page_writeback(page);
 406                        truncate_inode_page(mapping, page);
 407                        unlock_page(page);
 408                }
 409                pagevec_remove_exceptionals(&pvec);
 410                pagevec_release(&pvec);
 411                index++;
 412        }
 413        cleancache_invalidate_inode(mapping);
 414}
 415EXPORT_SYMBOL(truncate_inode_pages_range);
 416
 417/**
 418 * truncate_inode_pages - truncate *all* the pages from an offset
 419 * @mapping: mapping to truncate
 420 * @lstart: offset from which to truncate
 421 *
 422 * Called under (and serialised by) inode->i_mutex.
 423 *
 424 * Note: When this function returns, there can be a page in the process of
 425 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
 426 * mapping->nrpages can be non-zero when this function returns even after
 427 * truncation of the whole mapping.
 428 */
 429void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
 430{
 431        truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
 432}
 433EXPORT_SYMBOL(truncate_inode_pages);
 434
 435/**
 436 * truncate_inode_pages_final - truncate *all* pages before inode dies
 437 * @mapping: mapping to truncate
 438 *
 439 * Called under (and serialized by) inode->i_mutex.
 440 *
 441 * Filesystems have to use this in the .evict_inode path to inform the
 442 * VM that this is the final truncate and the inode is going away.
 443 */
 444void truncate_inode_pages_final(struct address_space *mapping)
 445{
 446        unsigned long nrexceptional;
 447        unsigned long nrpages;
 448
 449        /*
 450         * Page reclaim can not participate in regular inode lifetime
 451         * management (can't call iput()) and thus can race with the
 452         * inode teardown.  Tell it when the address space is exiting,
 453         * so that it does not install eviction information after the
 454         * final truncate has begun.
 455         */
 456        mapping_set_exiting(mapping);
 457
 458        /*
 459         * When reclaim installs eviction entries, it increases
 460         * nrexceptional first, then decreases nrpages.  Make sure we see
 461         * this in the right order or we might miss an entry.
 462         */
 463        nrpages = mapping->nrpages;
 464        smp_rmb();
 465        nrexceptional = mapping->nrexceptional;
 466
 467        if (nrpages || nrexceptional) {
 468                /*
 469                 * As truncation uses a lockless tree lookup, cycle
 470                 * the tree lock to make sure any ongoing tree
 471                 * modification that does not see AS_EXITING is
 472                 * completed before starting the final truncate.
 473                 */
 474                spin_lock_irq(&mapping->tree_lock);
 475                spin_unlock_irq(&mapping->tree_lock);
 476
 477                truncate_inode_pages(mapping, 0);
 478        }
 479}
 480EXPORT_SYMBOL(truncate_inode_pages_final);
 481
 482/**
 483 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 484 * @mapping: the address_space which holds the pages to invalidate
 485 * @start: the offset 'from' which to invalidate
 486 * @end: the offset 'to' which to invalidate (inclusive)
 487 *
 488 * This function only removes the unlocked pages, if you want to
 489 * remove all the pages of one inode, you must call truncate_inode_pages.
 490 *
 491 * invalidate_mapping_pages() will not block on IO activity. It will not
 492 * invalidate pages which are dirty, locked, under writeback or mapped into
 493 * pagetables.
 494 */
 495unsigned long invalidate_mapping_pages(struct address_space *mapping,
 496                pgoff_t start, pgoff_t end)
 497{
 498        pgoff_t indices[PAGEVEC_SIZE];
 499        struct pagevec pvec;
 500        pgoff_t index = start;
 501        unsigned long ret;
 502        unsigned long count = 0;
 503        int i;
 504
 505        pagevec_init(&pvec, 0);
 506        while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
 507                        min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
 508                        indices)) {
 509                for (i = 0; i < pagevec_count(&pvec); i++) {
 510                        struct page *page = pvec.pages[i];
 511
 512                        /* We rely upon deletion not changing page->index */
 513                        index = indices[i];
 514                        if (index > end)
 515                                break;
 516
 517                        if (radix_tree_exceptional_entry(page)) {
 518                                invalidate_exceptional_entry(mapping, index,
 519                                                             page);
 520                                continue;
 521                        }
 522
 523                        if (!trylock_page(page))
 524                                continue;
 525
 526                        WARN_ON(page_to_index(page) != index);
 527
 528                        /* Middle of THP: skip */
 529                        if (PageTransTail(page)) {
 530                                unlock_page(page);
 531                                continue;
 532                        } else if (PageTransHuge(page)) {
 533                                index += HPAGE_PMD_NR - 1;
 534                                i += HPAGE_PMD_NR - 1;
 535                                /* 'end' is in the middle of THP */
 536                                if (index ==  round_down(end, HPAGE_PMD_NR))
 537                                        continue;
 538                        }
 539
 540                        ret = invalidate_inode_page(page);
 541                        unlock_page(page);
 542                        /*
 543                         * Invalidation is a hint that the page is no longer
 544                         * of interest and try to speed up its reclaim.
 545                         */
 546                        if (!ret)
 547                                deactivate_file_page(page);
 548                        count += ret;
 549                }
 550                pagevec_remove_exceptionals(&pvec);
 551                pagevec_release(&pvec);
 552                cond_resched();
 553                index++;
 554        }
 555        return count;
 556}
 557EXPORT_SYMBOL(invalidate_mapping_pages);
 558
 559/*
 560 * This is like invalidate_complete_page(), except it ignores the page's
 561 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 562 * invalidation guarantees, and cannot afford to leave pages behind because
 563 * shrink_page_list() has a temp ref on them, or because they're transiently
 564 * sitting in the lru_cache_add() pagevecs.
 565 */
 566static int
 567invalidate_complete_page2(struct address_space *mapping, struct page *page)
 568{
 569        unsigned long flags;
 570
 571        if (page->mapping != mapping)
 572                return 0;
 573
 574        if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
 575                return 0;
 576
 577        spin_lock_irqsave(&mapping->tree_lock, flags);
 578        if (PageDirty(page))
 579                goto failed;
 580
 581        BUG_ON(page_has_private(page));
 582        __delete_from_page_cache(page, NULL);
 583        spin_unlock_irqrestore(&mapping->tree_lock, flags);
 584
 585        if (mapping->a_ops->freepage)
 586                mapping->a_ops->freepage(page);
 587
 588        put_page(page); /* pagecache ref */
 589        return 1;
 590failed:
 591        spin_unlock_irqrestore(&mapping->tree_lock, flags);
 592        return 0;
 593}
 594
 595static int do_launder_page(struct address_space *mapping, struct page *page)
 596{
 597        if (!PageDirty(page))
 598                return 0;
 599        if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
 600                return 0;
 601        return mapping->a_ops->launder_page(page);
 602}
 603
 604/**
 605 * invalidate_inode_pages2_range - remove range of pages from an address_space
 606 * @mapping: the address_space
 607 * @start: the page offset 'from' which to invalidate
 608 * @end: the page offset 'to' which to invalidate (inclusive)
 609 *
 610 * Any pages which are found to be mapped into pagetables are unmapped prior to
 611 * invalidation.
 612 *
 613 * Returns -EBUSY if any pages could not be invalidated.
 614 */
 615int invalidate_inode_pages2_range(struct address_space *mapping,
 616                                  pgoff_t start, pgoff_t end)
 617{
 618        pgoff_t indices[PAGEVEC_SIZE];
 619        struct pagevec pvec;
 620        pgoff_t index;
 621        int i;
 622        int ret = 0;
 623        int ret2 = 0;
 624        int did_range_unmap = 0;
 625
 626        cleancache_invalidate_inode(mapping);
 627        pagevec_init(&pvec, 0);
 628        index = start;
 629        while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
 630                        min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
 631                        indices)) {
 632                for (i = 0; i < pagevec_count(&pvec); i++) {
 633                        struct page *page = pvec.pages[i];
 634
 635                        /* We rely upon deletion not changing page->index */
 636                        index = indices[i];
 637                        if (index > end)
 638                                break;
 639
 640                        if (radix_tree_exceptional_entry(page)) {
 641                                if (!invalidate_exceptional_entry2(mapping,
 642                                                                   index, page))
 643                                        ret = -EBUSY;
 644                                continue;
 645                        }
 646
 647                        lock_page(page);
 648                        WARN_ON(page_to_index(page) != index);
 649                        if (page->mapping != mapping) {
 650                                unlock_page(page);
 651                                continue;
 652                        }
 653                        wait_on_page_writeback(page);
 654                        if (page_mapped(page)) {
 655                                if (!did_range_unmap) {
 656                                        /*
 657                                         * Zap the rest of the file in one hit.
 658                                         */
 659                                        unmap_mapping_range(mapping,
 660                                           (loff_t)index << PAGE_SHIFT,
 661                                           (loff_t)(1 + end - index)
 662                                                         << PAGE_SHIFT,
 663                                                         0);
 664                                        did_range_unmap = 1;
 665                                } else {
 666                                        /*
 667                                         * Just zap this page
 668                                         */
 669                                        unmap_mapping_range(mapping,
 670                                           (loff_t)index << PAGE_SHIFT,
 671                                           PAGE_SIZE, 0);
 672                                }
 673                        }
 674                        BUG_ON(page_mapped(page));
 675                        ret2 = do_launder_page(mapping, page);
 676                        if (ret2 == 0) {
 677                                if (!invalidate_complete_page2(mapping, page))
 678                                        ret2 = -EBUSY;
 679                        }
 680                        if (ret2 < 0)
 681                                ret = ret2;
 682                        unlock_page(page);
 683                }
 684                pagevec_remove_exceptionals(&pvec);
 685                pagevec_release(&pvec);
 686                cond_resched();
 687                index++;
 688        }
 689        cleancache_invalidate_inode(mapping);
 690        return ret;
 691}
 692EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
 693
 694/**
 695 * invalidate_inode_pages2 - remove all pages from an address_space
 696 * @mapping: the address_space
 697 *
 698 * Any pages which are found to be mapped into pagetables are unmapped prior to
 699 * invalidation.
 700 *
 701 * Returns -EBUSY if any pages could not be invalidated.
 702 */
 703int invalidate_inode_pages2(struct address_space *mapping)
 704{
 705        return invalidate_inode_pages2_range(mapping, 0, -1);
 706}
 707EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
 708
 709/**
 710 * truncate_pagecache - unmap and remove pagecache that has been truncated
 711 * @inode: inode
 712 * @newsize: new file size
 713 *
 714 * inode's new i_size must already be written before truncate_pagecache
 715 * is called.
 716 *
 717 * This function should typically be called before the filesystem
 718 * releases resources associated with the freed range (eg. deallocates
 719 * blocks). This way, pagecache will always stay logically coherent
 720 * with on-disk format, and the filesystem would not have to deal with
 721 * situations such as writepage being called for a page that has already
 722 * had its underlying blocks deallocated.
 723 */
 724void truncate_pagecache(struct inode *inode, loff_t newsize)
 725{
 726        struct address_space *mapping = inode->i_mapping;
 727        loff_t holebegin = round_up(newsize, PAGE_SIZE);
 728
 729        /*
 730         * unmap_mapping_range is called twice, first simply for
 731         * efficiency so that truncate_inode_pages does fewer
 732         * single-page unmaps.  However after this first call, and
 733         * before truncate_inode_pages finishes, it is possible for
 734         * private pages to be COWed, which remain after
 735         * truncate_inode_pages finishes, hence the second
 736         * unmap_mapping_range call must be made for correctness.
 737         */
 738        unmap_mapping_range(mapping, holebegin, 0, 1);
 739        truncate_inode_pages(mapping, newsize);
 740        unmap_mapping_range(mapping, holebegin, 0, 1);
 741}
 742EXPORT_SYMBOL(truncate_pagecache);
 743
 744/**
 745 * truncate_setsize - update inode and pagecache for a new file size
 746 * @inode: inode
 747 * @newsize: new file size
 748 *
 749 * truncate_setsize updates i_size and performs pagecache truncation (if
 750 * necessary) to @newsize. It will be typically be called from the filesystem's
 751 * setattr function when ATTR_SIZE is passed in.
 752 *
 753 * Must be called with a lock serializing truncates and writes (generally
 754 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
 755 * specific block truncation has been performed.
 756 */
 757void truncate_setsize(struct inode *inode, loff_t newsize)
 758{
 759        loff_t oldsize = inode->i_size;
 760
 761        i_size_write(inode, newsize);
 762        if (newsize > oldsize)
 763                pagecache_isize_extended(inode, oldsize, newsize);
 764        truncate_pagecache(inode, newsize);
 765}
 766EXPORT_SYMBOL(truncate_setsize);
 767
 768/**
 769 * pagecache_isize_extended - update pagecache after extension of i_size
 770 * @inode:      inode for which i_size was extended
 771 * @from:       original inode size
 772 * @to:         new inode size
 773 *
 774 * Handle extension of inode size either caused by extending truncate or by
 775 * write starting after current i_size. We mark the page straddling current
 776 * i_size RO so that page_mkwrite() is called on the nearest write access to
 777 * the page.  This way filesystem can be sure that page_mkwrite() is called on
 778 * the page before user writes to the page via mmap after the i_size has been
 779 * changed.
 780 *
 781 * The function must be called after i_size is updated so that page fault
 782 * coming after we unlock the page will already see the new i_size.
 783 * The function must be called while we still hold i_mutex - this not only
 784 * makes sure i_size is stable but also that userspace cannot observe new
 785 * i_size value before we are prepared to store mmap writes at new inode size.
 786 */
 787void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
 788{
 789        int bsize = i_blocksize(inode);
 790        loff_t rounded_from;
 791        struct page *page;
 792        pgoff_t index;
 793
 794        WARN_ON(to > inode->i_size);
 795
 796        if (from >= to || bsize == PAGE_SIZE)
 797                return;
 798        /* Page straddling @from will not have any hole block created? */
 799        rounded_from = round_up(from, bsize);
 800        if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
 801                return;
 802
 803        index = from >> PAGE_SHIFT;
 804        page = find_lock_page(inode->i_mapping, index);
 805        /* Page not cached? Nothing to do */
 806        if (!page)
 807                return;
 808        /*
 809         * See clear_page_dirty_for_io() for details why set_page_dirty()
 810         * is needed.
 811         */
 812        if (page_mkclean(page))
 813                set_page_dirty(page);
 814        unlock_page(page);
 815        put_page(page);
 816}
 817EXPORT_SYMBOL(pagecache_isize_extended);
 818
 819/**
 820 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 821 * @inode: inode
 822 * @lstart: offset of beginning of hole
 823 * @lend: offset of last byte of hole
 824 *
 825 * This function should typically be called before the filesystem
 826 * releases resources associated with the freed range (eg. deallocates
 827 * blocks). This way, pagecache will always stay logically coherent
 828 * with on-disk format, and the filesystem would not have to deal with
 829 * situations such as writepage being called for a page that has already
 830 * had its underlying blocks deallocated.
 831 */
 832void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
 833{
 834        struct address_space *mapping = inode->i_mapping;
 835        loff_t unmap_start = round_up(lstart, PAGE_SIZE);
 836        loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
 837        /*
 838         * This rounding is currently just for example: unmap_mapping_range
 839         * expands its hole outwards, whereas we want it to contract the hole
 840         * inwards.  However, existing callers of truncate_pagecache_range are
 841         * doing their own page rounding first.  Note that unmap_mapping_range
 842         * allows holelen 0 for all, and we allow lend -1 for end of file.
 843         */
 844
 845        /*
 846         * Unlike in truncate_pagecache, unmap_mapping_range is called only
 847         * once (before truncating pagecache), and without "even_cows" flag:
 848         * hole-punching should not remove private COWed pages from the hole.
 849         */
 850        if ((u64)unmap_end > (u64)unmap_start)
 851                unmap_mapping_range(mapping, unmap_start,
 852                                    1 + unmap_end - unmap_start, 0);
 853        truncate_inode_pages_range(mapping, lstart, lend);
 854}
 855EXPORT_SYMBOL(truncate_pagecache_range);
 856