linux/mm/truncate.c
<<
>>
Prefs
   1/*
   2 * mm/truncate.c - code for taking down pages from address_spaces
   3 *
   4 * Copyright (C) 2002, Linus Torvalds
   5 *
   6 * 10Sep2002    Andrew Morton
   7 *              Initial version.
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/backing-dev.h>
  12#include <linux/dax.h>
  13#include <linux/gfp.h>
  14#include <linux/mm.h>
  15#include <linux/swap.h>
  16#include <linux/export.h>
  17#include <linux/pagemap.h>
  18#include <linux/highmem.h>
  19#include <linux/pagevec.h>
  20#include <linux/task_io_accounting_ops.h>
  21#include <linux/buffer_head.h>  /* grr. try_to_release_page,
  22                                   do_invalidatepage */
  23#include <linux/shmem_fs.h>
  24#include <linux/cleancache.h>
  25#include <linux/rmap.h>
  26#include "internal.h"
  27
  28/*
  29 * Regular page slots are stabilized by the page lock even without the tree
  30 * itself locked.  These unlocked entries need verification under the tree
  31 * lock.
  32 */
  33static inline void __clear_shadow_entry(struct address_space *mapping,
  34                                pgoff_t index, void *entry)
  35{
  36        struct radix_tree_node *node;
  37        void **slot;
  38
  39        if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot))
  40                return;
  41        if (*slot != entry)
  42                return;
  43        __radix_tree_replace(&mapping->i_pages, node, slot, NULL,
  44                             workingset_update_node);
  45        mapping->nrexceptional--;
  46}
  47
  48static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
  49                               void *entry)
  50{
  51        xa_lock_irq(&mapping->i_pages);
  52        __clear_shadow_entry(mapping, index, entry);
  53        xa_unlock_irq(&mapping->i_pages);
  54}
  55
  56/*
  57 * Unconditionally remove exceptional entries. Usually called from truncate
  58 * path. Note that the pagevec may be altered by this function by removing
  59 * exceptional entries similar to what pagevec_remove_exceptionals does.
  60 */
  61static void truncate_exceptional_pvec_entries(struct address_space *mapping,
  62                                struct pagevec *pvec, pgoff_t *indices,
  63                                pgoff_t end)
  64{
  65        int i, j;
  66        bool dax, lock;
  67
  68        /* Handled by shmem itself */
  69        if (shmem_mapping(mapping))
  70                return;
  71
  72        for (j = 0; j < pagevec_count(pvec); j++)
  73                if (radix_tree_exceptional_entry(pvec->pages[j]))
  74                        break;
  75
  76        if (j == pagevec_count(pvec))
  77                return;
  78
  79        dax = dax_mapping(mapping);
  80        lock = !dax && indices[j] < end;
  81        if (lock)
  82                xa_lock_irq(&mapping->i_pages);
  83
  84        for (i = j; i < pagevec_count(pvec); i++) {
  85                struct page *page = pvec->pages[i];
  86                pgoff_t index = indices[i];
  87
  88                if (!radix_tree_exceptional_entry(page)) {
  89                        pvec->pages[j++] = page;
  90                        continue;
  91                }
  92
  93                if (index >= end)
  94                        continue;
  95
  96                if (unlikely(dax)) {
  97                        dax_delete_mapping_entry(mapping, index);
  98                        continue;
  99                }
 100
 101                __clear_shadow_entry(mapping, index, page);
 102        }
 103
 104        if (lock)
 105                xa_unlock_irq(&mapping->i_pages);
 106        pvec->nr = j;
 107}
 108
 109/*
 110 * Invalidate exceptional entry if easily possible. This handles exceptional
 111 * entries for invalidate_inode_pages().
 112 */
 113static int invalidate_exceptional_entry(struct address_space *mapping,
 114                                        pgoff_t index, void *entry)
 115{
 116        /* Handled by shmem itself, or for DAX we do nothing. */
 117        if (shmem_mapping(mapping) || dax_mapping(mapping))
 118                return 1;
 119        clear_shadow_entry(mapping, index, entry);
 120        return 1;
 121}
 122
 123/*
 124 * Invalidate exceptional entry if clean. This handles exceptional entries for
 125 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
 126 */
 127static int invalidate_exceptional_entry2(struct address_space *mapping,
 128                                         pgoff_t index, void *entry)
 129{
 130        /* Handled by shmem itself */
 131        if (shmem_mapping(mapping))
 132                return 1;
 133        if (dax_mapping(mapping))
 134                return dax_invalidate_mapping_entry_sync(mapping, index);
 135        clear_shadow_entry(mapping, index, entry);
 136        return 1;
 137}
 138
 139/**
 140 * do_invalidatepage - invalidate part or all of a page
 141 * @page: the page which is affected
 142 * @offset: start of the range to invalidate
 143 * @length: length of the range to invalidate
 144 *
 145 * do_invalidatepage() is called when all or part of the page has become
 146 * invalidated by a truncate operation.
 147 *
 148 * do_invalidatepage() does not have to release all buffers, but it must
 149 * ensure that no dirty buffer is left outside @offset and that no I/O
 150 * is underway against any of the blocks which are outside the truncation
 151 * point.  Because the caller is about to free (and possibly reuse) those
 152 * blocks on-disk.
 153 */
 154void do_invalidatepage(struct page *page, unsigned int offset,
 155                       unsigned int length)
 156{
 157        void (*invalidatepage)(struct page *, unsigned int, unsigned int);
 158
 159        invalidatepage = page->mapping->a_ops->invalidatepage;
 160#ifdef CONFIG_BLOCK
 161        if (!invalidatepage)
 162                invalidatepage = block_invalidatepage;
 163#endif
 164        if (invalidatepage)
 165                (*invalidatepage)(page, offset, length);
 166}
 167
 168/*
 169 * If truncate cannot remove the fs-private metadata from the page, the page
 170 * becomes orphaned.  It will be left on the LRU and may even be mapped into
 171 * user pagetables if we're racing with filemap_fault().
 172 *
 173 * We need to bale out if page->mapping is no longer equal to the original
 174 * mapping.  This happens a) when the VM reclaimed the page while we waited on
 175 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
 176 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 177 */
 178static void
 179truncate_cleanup_page(struct address_space *mapping, struct page *page)
 180{
 181        if (page_mapped(page)) {
 182                pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
 183                unmap_mapping_pages(mapping, page->index, nr, false);
 184        }
 185
 186        if (page_has_private(page))
 187                do_invalidatepage(page, 0, PAGE_SIZE);
 188
 189        /*
 190         * Some filesystems seem to re-dirty the page even after
 191         * the VM has canceled the dirty bit (eg ext3 journaling).
 192         * Hence dirty accounting check is placed after invalidation.
 193         */
 194        cancel_dirty_page(page);
 195        ClearPageMappedToDisk(page);
 196}
 197
 198/*
 199 * This is for invalidate_mapping_pages().  That function can be called at
 200 * any time, and is not supposed to throw away dirty pages.  But pages can
 201 * be marked dirty at any time too, so use remove_mapping which safely
 202 * discards clean, unused pages.
 203 *
 204 * Returns non-zero if the page was successfully invalidated.
 205 */
 206static int
 207invalidate_complete_page(struct address_space *mapping, struct page *page)
 208{
 209        int ret;
 210
 211        if (page->mapping != mapping)
 212                return 0;
 213
 214        if (page_has_private(page) && !try_to_release_page(page, 0))
 215                return 0;
 216
 217        ret = remove_mapping(mapping, page);
 218
 219        return ret;
 220}
 221
 222int truncate_inode_page(struct address_space *mapping, struct page *page)
 223{
 224        VM_BUG_ON_PAGE(PageTail(page), page);
 225
 226        if (page->mapping != mapping)
 227                return -EIO;
 228
 229        truncate_cleanup_page(mapping, page);
 230        delete_from_page_cache(page);
 231        return 0;
 232}
 233
 234/*
 235 * Used to get rid of pages on hardware memory corruption.
 236 */
 237int generic_error_remove_page(struct address_space *mapping, struct page *page)
 238{
 239        if (!mapping)
 240                return -EINVAL;
 241        /*
 242         * Only punch for normal data pages for now.
 243         * Handling other types like directories would need more auditing.
 244         */
 245        if (!S_ISREG(mapping->host->i_mode))
 246                return -EIO;
 247        return truncate_inode_page(mapping, page);
 248}
 249EXPORT_SYMBOL(generic_error_remove_page);
 250
 251/*
 252 * Safely invalidate one page from its pagecache mapping.
 253 * It only drops clean, unused pages. The page must be locked.
 254 *
 255 * Returns 1 if the page is successfully invalidated, otherwise 0.
 256 */
 257int invalidate_inode_page(struct page *page)
 258{
 259        struct address_space *mapping = page_mapping(page);
 260        if (!mapping)
 261                return 0;
 262        if (PageDirty(page) || PageWriteback(page))
 263                return 0;
 264        if (page_mapped(page))
 265                return 0;
 266        return invalidate_complete_page(mapping, page);
 267}
 268
 269/**
 270 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
 271 * @mapping: mapping to truncate
 272 * @lstart: offset from which to truncate
 273 * @lend: offset to which to truncate (inclusive)
 274 *
 275 * Truncate the page cache, removing the pages that are between
 276 * specified offsets (and zeroing out partial pages
 277 * if lstart or lend + 1 is not page aligned).
 278 *
 279 * Truncate takes two passes - the first pass is nonblocking.  It will not
 280 * block on page locks and it will not block on writeback.  The second pass
 281 * will wait.  This is to prevent as much IO as possible in the affected region.
 282 * The first pass will remove most pages, so the search cost of the second pass
 283 * is low.
 284 *
 285 * We pass down the cache-hot hint to the page freeing code.  Even if the
 286 * mapping is large, it is probably the case that the final pages are the most
 287 * recently touched, and freeing happens in ascending file offset order.
 288 *
 289 * Note that since ->invalidatepage() accepts range to invalidate
 290 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 291 * page aligned properly.
 292 */
 293void truncate_inode_pages_range(struct address_space *mapping,
 294                                loff_t lstart, loff_t lend)
 295{
 296        pgoff_t         start;          /* inclusive */
 297        pgoff_t         end;            /* exclusive */
 298        unsigned int    partial_start;  /* inclusive */
 299        unsigned int    partial_end;    /* exclusive */
 300        struct pagevec  pvec;
 301        pgoff_t         indices[PAGEVEC_SIZE];
 302        pgoff_t         index;
 303        int             i;
 304
 305        if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
 306                goto out;
 307
 308        /* Offsets within partial pages */
 309        partial_start = lstart & (PAGE_SIZE - 1);
 310        partial_end = (lend + 1) & (PAGE_SIZE - 1);
 311
 312        /*
 313         * 'start' and 'end' always covers the range of pages to be fully
 314         * truncated. Partial pages are covered with 'partial_start' at the
 315         * start of the range and 'partial_end' at the end of the range.
 316         * Note that 'end' is exclusive while 'lend' is inclusive.
 317         */
 318        start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
 319        if (lend == -1)
 320                /*
 321                 * lend == -1 indicates end-of-file so we have to set 'end'
 322                 * to the highest possible pgoff_t and since the type is
 323                 * unsigned we're using -1.
 324                 */
 325                end = -1;
 326        else
 327                end = (lend + 1) >> PAGE_SHIFT;
 328
 329        pagevec_init(&pvec);
 330        index = start;
 331        while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
 332                        min(end - index, (pgoff_t)PAGEVEC_SIZE),
 333                        indices)) {
 334                /*
 335                 * Pagevec array has exceptional entries and we may also fail
 336                 * to lock some pages. So we store pages that can be deleted
 337                 * in a new pagevec.
 338                 */
 339                struct pagevec locked_pvec;
 340
 341                pagevec_init(&locked_pvec);
 342                for (i = 0; i < pagevec_count(&pvec); i++) {
 343                        struct page *page = pvec.pages[i];
 344
 345                        /* We rely upon deletion not changing page->index */
 346                        index = indices[i];
 347                        if (index >= end)
 348                                break;
 349
 350                        if (radix_tree_exceptional_entry(page))
 351                                continue;
 352
 353                        if (!trylock_page(page))
 354                                continue;
 355                        WARN_ON(page_to_index(page) != index);
 356                        if (PageWriteback(page)) {
 357                                unlock_page(page);
 358                                continue;
 359                        }
 360                        if (page->mapping != mapping) {
 361                                unlock_page(page);
 362                                continue;
 363                        }
 364                        pagevec_add(&locked_pvec, page);
 365                }
 366                for (i = 0; i < pagevec_count(&locked_pvec); i++)
 367                        truncate_cleanup_page(mapping, locked_pvec.pages[i]);
 368                delete_from_page_cache_batch(mapping, &locked_pvec);
 369                for (i = 0; i < pagevec_count(&locked_pvec); i++)
 370                        unlock_page(locked_pvec.pages[i]);
 371                truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
 372                pagevec_release(&pvec);
 373                cond_resched();
 374                index++;
 375        }
 376        if (partial_start) {
 377                struct page *page = find_lock_page(mapping, start - 1);
 378                if (page) {
 379                        unsigned int top = PAGE_SIZE;
 380                        if (start > end) {
 381                                /* Truncation within a single page */
 382                                top = partial_end;
 383                                partial_end = 0;
 384                        }
 385                        wait_on_page_writeback(page);
 386                        zero_user_segment(page, partial_start, top);
 387                        cleancache_invalidate_page(mapping, page);
 388                        if (page_has_private(page))
 389                                do_invalidatepage(page, partial_start,
 390                                                  top - partial_start);
 391                        unlock_page(page);
 392                        put_page(page);
 393                }
 394        }
 395        if (partial_end) {
 396                struct page *page = find_lock_page(mapping, end);
 397                if (page) {
 398                        wait_on_page_writeback(page);
 399                        zero_user_segment(page, 0, partial_end);
 400                        cleancache_invalidate_page(mapping, page);
 401                        if (page_has_private(page))
 402                                do_invalidatepage(page, 0,
 403                                                  partial_end);
 404                        unlock_page(page);
 405                        put_page(page);
 406                }
 407        }
 408        /*
 409         * If the truncation happened within a single page no pages
 410         * will be released, just zeroed, so we can bail out now.
 411         */
 412        if (start >= end)
 413                goto out;
 414
 415        index = start;
 416        for ( ; ; ) {
 417                cond_resched();
 418                if (!pagevec_lookup_entries(&pvec, mapping, index,
 419                        min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
 420                        /* If all gone from start onwards, we're done */
 421                        if (index == start)
 422                                break;
 423                        /* Otherwise restart to make sure all gone */
 424                        index = start;
 425                        continue;
 426                }
 427                if (index == start && indices[0] >= end) {
 428                        /* All gone out of hole to be punched, we're done */
 429                        pagevec_remove_exceptionals(&pvec);
 430                        pagevec_release(&pvec);
 431                        break;
 432                }
 433
 434                for (i = 0; i < pagevec_count(&pvec); i++) {
 435                        struct page *page = pvec.pages[i];
 436
 437                        /* We rely upon deletion not changing page->index */
 438                        index = indices[i];
 439                        if (index >= end) {
 440                                /* Restart punch to make sure all gone */
 441                                index = start - 1;
 442                                break;
 443                        }
 444
 445                        if (radix_tree_exceptional_entry(page))
 446                                continue;
 447
 448                        lock_page(page);
 449                        WARN_ON(page_to_index(page) != index);
 450                        wait_on_page_writeback(page);
 451                        truncate_inode_page(mapping, page);
 452                        unlock_page(page);
 453                }
 454                truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
 455                pagevec_release(&pvec);
 456                index++;
 457        }
 458
 459out:
 460        cleancache_invalidate_inode(mapping);
 461}
 462EXPORT_SYMBOL(truncate_inode_pages_range);
 463
 464/**
 465 * truncate_inode_pages - truncate *all* the pages from an offset
 466 * @mapping: mapping to truncate
 467 * @lstart: offset from which to truncate
 468 *
 469 * Called under (and serialised by) inode->i_mutex.
 470 *
 471 * Note: When this function returns, there can be a page in the process of
 472 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
 473 * mapping->nrpages can be non-zero when this function returns even after
 474 * truncation of the whole mapping.
 475 */
 476void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
 477{
 478        truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
 479}
 480EXPORT_SYMBOL(truncate_inode_pages);
 481
 482/**
 483 * truncate_inode_pages_final - truncate *all* pages before inode dies
 484 * @mapping: mapping to truncate
 485 *
 486 * Called under (and serialized by) inode->i_mutex.
 487 *
 488 * Filesystems have to use this in the .evict_inode path to inform the
 489 * VM that this is the final truncate and the inode is going away.
 490 */
 491void truncate_inode_pages_final(struct address_space *mapping)
 492{
 493        unsigned long nrexceptional;
 494        unsigned long nrpages;
 495
 496        /*
 497         * Page reclaim can not participate in regular inode lifetime
 498         * management (can't call iput()) and thus can race with the
 499         * inode teardown.  Tell it when the address space is exiting,
 500         * so that it does not install eviction information after the
 501         * final truncate has begun.
 502         */
 503        mapping_set_exiting(mapping);
 504
 505        /*
 506         * When reclaim installs eviction entries, it increases
 507         * nrexceptional first, then decreases nrpages.  Make sure we see
 508         * this in the right order or we might miss an entry.
 509         */
 510        nrpages = mapping->nrpages;
 511        smp_rmb();
 512        nrexceptional = mapping->nrexceptional;
 513
 514        if (nrpages || nrexceptional) {
 515                /*
 516                 * As truncation uses a lockless tree lookup, cycle
 517                 * the tree lock to make sure any ongoing tree
 518                 * modification that does not see AS_EXITING is
 519                 * completed before starting the final truncate.
 520                 */
 521                xa_lock_irq(&mapping->i_pages);
 522                xa_unlock_irq(&mapping->i_pages);
 523
 524                truncate_inode_pages(mapping, 0);
 525        }
 526}
 527EXPORT_SYMBOL(truncate_inode_pages_final);
 528
 529/**
 530 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 531 * @mapping: the address_space which holds the pages to invalidate
 532 * @start: the offset 'from' which to invalidate
 533 * @end: the offset 'to' which to invalidate (inclusive)
 534 *
 535 * This function only removes the unlocked pages, if you want to
 536 * remove all the pages of one inode, you must call truncate_inode_pages.
 537 *
 538 * invalidate_mapping_pages() will not block on IO activity. It will not
 539 * invalidate pages which are dirty, locked, under writeback or mapped into
 540 * pagetables.
 541 */
 542unsigned long invalidate_mapping_pages(struct address_space *mapping,
 543                pgoff_t start, pgoff_t end)
 544{
 545        pgoff_t indices[PAGEVEC_SIZE];
 546        struct pagevec pvec;
 547        pgoff_t index = start;
 548        unsigned long ret;
 549        unsigned long count = 0;
 550        int i;
 551
 552        pagevec_init(&pvec);
 553        while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
 554                        min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
 555                        indices)) {
 556                for (i = 0; i < pagevec_count(&pvec); i++) {
 557                        struct page *page = pvec.pages[i];
 558
 559                        /* We rely upon deletion not changing page->index */
 560                        index = indices[i];
 561                        if (index > end)
 562                                break;
 563
 564                        if (radix_tree_exceptional_entry(page)) {
 565                                invalidate_exceptional_entry(mapping, index,
 566                                                             page);
 567                                continue;
 568                        }
 569
 570                        if (!trylock_page(page))
 571                                continue;
 572
 573                        WARN_ON(page_to_index(page) != index);
 574
 575                        /* Middle of THP: skip */
 576                        if (PageTransTail(page)) {
 577                                unlock_page(page);
 578                                continue;
 579                        } else if (PageTransHuge(page)) {
 580                                index += HPAGE_PMD_NR - 1;
 581                                i += HPAGE_PMD_NR - 1;
 582                                /*
 583                                 * 'end' is in the middle of THP. Don't
 584                                 * invalidate the page as the part outside of
 585                                 * 'end' could be still useful.
 586                                 */
 587                                if (index > end) {
 588                                        unlock_page(page);
 589                                        continue;
 590                                }
 591                        }
 592
 593                        ret = invalidate_inode_page(page);
 594                        unlock_page(page);
 595                        /*
 596                         * Invalidation is a hint that the page is no longer
 597                         * of interest and try to speed up its reclaim.
 598                         */
 599                        if (!ret)
 600                                deactivate_file_page(page);
 601                        count += ret;
 602                }
 603                pagevec_remove_exceptionals(&pvec);
 604                pagevec_release(&pvec);
 605                cond_resched();
 606                index++;
 607        }
 608        return count;
 609}
 610EXPORT_SYMBOL(invalidate_mapping_pages);
 611
 612/*
 613 * This is like invalidate_complete_page(), except it ignores the page's
 614 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 615 * invalidation guarantees, and cannot afford to leave pages behind because
 616 * shrink_page_list() has a temp ref on them, or because they're transiently
 617 * sitting in the lru_cache_add() pagevecs.
 618 */
 619static int
 620invalidate_complete_page2(struct address_space *mapping, struct page *page)
 621{
 622        unsigned long flags;
 623
 624        if (page->mapping != mapping)
 625                return 0;
 626
 627        if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
 628                return 0;
 629
 630        xa_lock_irqsave(&mapping->i_pages, flags);
 631        if (PageDirty(page))
 632                goto failed;
 633
 634        BUG_ON(page_has_private(page));
 635        __delete_from_page_cache(page, NULL);
 636        xa_unlock_irqrestore(&mapping->i_pages, flags);
 637
 638        if (mapping->a_ops->freepage)
 639                mapping->a_ops->freepage(page);
 640
 641        put_page(page); /* pagecache ref */
 642        return 1;
 643failed:
 644        xa_unlock_irqrestore(&mapping->i_pages, flags);
 645        return 0;
 646}
 647
 648static int do_launder_page(struct address_space *mapping, struct page *page)
 649{
 650        if (!PageDirty(page))
 651                return 0;
 652        if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
 653                return 0;
 654        return mapping->a_ops->launder_page(page);
 655}
 656
 657/**
 658 * invalidate_inode_pages2_range - remove range of pages from an address_space
 659 * @mapping: the address_space
 660 * @start: the page offset 'from' which to invalidate
 661 * @end: the page offset 'to' which to invalidate (inclusive)
 662 *
 663 * Any pages which are found to be mapped into pagetables are unmapped prior to
 664 * invalidation.
 665 *
 666 * Returns -EBUSY if any pages could not be invalidated.
 667 */
 668int invalidate_inode_pages2_range(struct address_space *mapping,
 669                                  pgoff_t start, pgoff_t end)
 670{
 671        pgoff_t indices[PAGEVEC_SIZE];
 672        struct pagevec pvec;
 673        pgoff_t index;
 674        int i;
 675        int ret = 0;
 676        int ret2 = 0;
 677        int did_range_unmap = 0;
 678
 679        if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
 680                goto out;
 681
 682        pagevec_init(&pvec);
 683        index = start;
 684        while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
 685                        min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
 686                        indices)) {
 687                for (i = 0; i < pagevec_count(&pvec); i++) {
 688                        struct page *page = pvec.pages[i];
 689
 690                        /* We rely upon deletion not changing page->index */
 691                        index = indices[i];
 692                        if (index > end)
 693                                break;
 694
 695                        if (radix_tree_exceptional_entry(page)) {
 696                                if (!invalidate_exceptional_entry2(mapping,
 697                                                                   index, page))
 698                                        ret = -EBUSY;
 699                                continue;
 700                        }
 701
 702                        lock_page(page);
 703                        WARN_ON(page_to_index(page) != index);
 704                        if (page->mapping != mapping) {
 705                                unlock_page(page);
 706                                continue;
 707                        }
 708                        wait_on_page_writeback(page);
 709                        if (page_mapped(page)) {
 710                                if (!did_range_unmap) {
 711                                        /*
 712                                         * Zap the rest of the file in one hit.
 713                                         */
 714                                        unmap_mapping_pages(mapping, index,
 715                                                (1 + end - index), false);
 716                                        did_range_unmap = 1;
 717                                } else {
 718                                        /*
 719                                         * Just zap this page
 720                                         */
 721                                        unmap_mapping_pages(mapping, index,
 722                                                                1, false);
 723                                }
 724                        }
 725                        BUG_ON(page_mapped(page));
 726                        ret2 = do_launder_page(mapping, page);
 727                        if (ret2 == 0) {
 728                                if (!invalidate_complete_page2(mapping, page))
 729                                        ret2 = -EBUSY;
 730                        }
 731                        if (ret2 < 0)
 732                                ret = ret2;
 733                        unlock_page(page);
 734                }
 735                pagevec_remove_exceptionals(&pvec);
 736                pagevec_release(&pvec);
 737                cond_resched();
 738                index++;
 739        }
 740        /*
 741         * For DAX we invalidate page tables after invalidating radix tree.  We
 742         * could invalidate page tables while invalidating each entry however
 743         * that would be expensive. And doing range unmapping before doesn't
 744         * work as we have no cheap way to find whether radix tree entry didn't
 745         * get remapped later.
 746         */
 747        if (dax_mapping(mapping)) {
 748                unmap_mapping_pages(mapping, start, end - start + 1, false);
 749        }
 750out:
 751        cleancache_invalidate_inode(mapping);
 752        return ret;
 753}
 754EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
 755
 756/**
 757 * invalidate_inode_pages2 - remove all pages from an address_space
 758 * @mapping: the address_space
 759 *
 760 * Any pages which are found to be mapped into pagetables are unmapped prior to
 761 * invalidation.
 762 *
 763 * Returns -EBUSY if any pages could not be invalidated.
 764 */
 765int invalidate_inode_pages2(struct address_space *mapping)
 766{
 767        return invalidate_inode_pages2_range(mapping, 0, -1);
 768}
 769EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
 770
 771/**
 772 * truncate_pagecache - unmap and remove pagecache that has been truncated
 773 * @inode: inode
 774 * @newsize: new file size
 775 *
 776 * inode's new i_size must already be written before truncate_pagecache
 777 * is called.
 778 *
 779 * This function should typically be called before the filesystem
 780 * releases resources associated with the freed range (eg. deallocates
 781 * blocks). This way, pagecache will always stay logically coherent
 782 * with on-disk format, and the filesystem would not have to deal with
 783 * situations such as writepage being called for a page that has already
 784 * had its underlying blocks deallocated.
 785 */
 786void truncate_pagecache(struct inode *inode, loff_t newsize)
 787{
 788        struct address_space *mapping = inode->i_mapping;
 789        loff_t holebegin = round_up(newsize, PAGE_SIZE);
 790
 791        /*
 792         * unmap_mapping_range is called twice, first simply for
 793         * efficiency so that truncate_inode_pages does fewer
 794         * single-page unmaps.  However after this first call, and
 795         * before truncate_inode_pages finishes, it is possible for
 796         * private pages to be COWed, which remain after
 797         * truncate_inode_pages finishes, hence the second
 798         * unmap_mapping_range call must be made for correctness.
 799         */
 800        unmap_mapping_range(mapping, holebegin, 0, 1);
 801        truncate_inode_pages(mapping, newsize);
 802        unmap_mapping_range(mapping, holebegin, 0, 1);
 803}
 804EXPORT_SYMBOL(truncate_pagecache);
 805
 806/**
 807 * truncate_setsize - update inode and pagecache for a new file size
 808 * @inode: inode
 809 * @newsize: new file size
 810 *
 811 * truncate_setsize updates i_size and performs pagecache truncation (if
 812 * necessary) to @newsize. It will be typically be called from the filesystem's
 813 * setattr function when ATTR_SIZE is passed in.
 814 *
 815 * Must be called with a lock serializing truncates and writes (generally
 816 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
 817 * specific block truncation has been performed.
 818 */
 819void truncate_setsize(struct inode *inode, loff_t newsize)
 820{
 821        loff_t oldsize = inode->i_size;
 822
 823        i_size_write(inode, newsize);
 824        if (newsize > oldsize)
 825                pagecache_isize_extended(inode, oldsize, newsize);
 826        truncate_pagecache(inode, newsize);
 827}
 828EXPORT_SYMBOL(truncate_setsize);
 829
 830/**
 831 * pagecache_isize_extended - update pagecache after extension of i_size
 832 * @inode:      inode for which i_size was extended
 833 * @from:       original inode size
 834 * @to:         new inode size
 835 *
 836 * Handle extension of inode size either caused by extending truncate or by
 837 * write starting after current i_size. We mark the page straddling current
 838 * i_size RO so that page_mkwrite() is called on the nearest write access to
 839 * the page.  This way filesystem can be sure that page_mkwrite() is called on
 840 * the page before user writes to the page via mmap after the i_size has been
 841 * changed.
 842 *
 843 * The function must be called after i_size is updated so that page fault
 844 * coming after we unlock the page will already see the new i_size.
 845 * The function must be called while we still hold i_mutex - this not only
 846 * makes sure i_size is stable but also that userspace cannot observe new
 847 * i_size value before we are prepared to store mmap writes at new inode size.
 848 */
 849void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
 850{
 851        int bsize = i_blocksize(inode);
 852        loff_t rounded_from;
 853        struct page *page;
 854        pgoff_t index;
 855
 856        WARN_ON(to > inode->i_size);
 857
 858        if (from >= to || bsize == PAGE_SIZE)
 859                return;
 860        /* Page straddling @from will not have any hole block created? */
 861        rounded_from = round_up(from, bsize);
 862        if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
 863                return;
 864
 865        index = from >> PAGE_SHIFT;
 866        page = find_lock_page(inode->i_mapping, index);
 867        /* Page not cached? Nothing to do */
 868        if (!page)
 869                return;
 870        /*
 871         * See clear_page_dirty_for_io() for details why set_page_dirty()
 872         * is needed.
 873         */
 874        if (page_mkclean(page))
 875                set_page_dirty(page);
 876        unlock_page(page);
 877        put_page(page);
 878}
 879EXPORT_SYMBOL(pagecache_isize_extended);
 880
 881/**
 882 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 883 * @inode: inode
 884 * @lstart: offset of beginning of hole
 885 * @lend: offset of last byte of hole
 886 *
 887 * This function should typically be called before the filesystem
 888 * releases resources associated with the freed range (eg. deallocates
 889 * blocks). This way, pagecache will always stay logically coherent
 890 * with on-disk format, and the filesystem would not have to deal with
 891 * situations such as writepage being called for a page that has already
 892 * had its underlying blocks deallocated.
 893 */
 894void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
 895{
 896        struct address_space *mapping = inode->i_mapping;
 897        loff_t unmap_start = round_up(lstart, PAGE_SIZE);
 898        loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
 899        /*
 900         * This rounding is currently just for example: unmap_mapping_range
 901         * expands its hole outwards, whereas we want it to contract the hole
 902         * inwards.  However, existing callers of truncate_pagecache_range are
 903         * doing their own page rounding first.  Note that unmap_mapping_range
 904         * allows holelen 0 for all, and we allow lend -1 for end of file.
 905         */
 906
 907        /*
 908         * Unlike in truncate_pagecache, unmap_mapping_range is called only
 909         * once (before truncating pagecache), and without "even_cows" flag:
 910         * hole-punching should not remove private COWed pages from the hole.
 911         */
 912        if ((u64)unmap_end > (u64)unmap_start)
 913                unmap_mapping_range(mapping, unmap_start,
 914                                    1 + unmap_end - unmap_start, 0);
 915        truncate_inode_pages_range(mapping, lstart, lend);
 916}
 917EXPORT_SYMBOL(truncate_pagecache_range);
 918