linux/mm/truncate.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/truncate.c - code for taking down pages from address_spaces
   4 *
   5 * Copyright (C) 2002, Linus Torvalds
   6 *
   7 * 10Sep2002    Andrew Morton
   8 *              Initial version.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/backing-dev.h>
  13#include <linux/dax.h>
  14#include <linux/gfp.h>
  15#include <linux/mm.h>
  16#include <linux/swap.h>
  17#include <linux/export.h>
  18#include <linux/pagemap.h>
  19#include <linux/highmem.h>
  20#include <linux/pagevec.h>
  21#include <linux/task_io_accounting_ops.h>
  22#include <linux/buffer_head.h>  /* grr. try_to_release_page,
  23                                   do_invalidatepage */
  24#include <linux/shmem_fs.h>
  25#include <linux/cleancache.h>
  26#include <linux/rmap.h>
  27#include "internal.h"
  28
  29/*
  30 * Regular page slots are stabilized by the page lock even without the tree
  31 * itself locked.  These unlocked entries need verification under the tree
  32 * lock.
  33 */
  34static inline void __clear_shadow_entry(struct address_space *mapping,
  35                                pgoff_t index, void *entry)
  36{
  37        XA_STATE(xas, &mapping->i_pages, index);
  38
  39        xas_set_update(&xas, workingset_update_node);
  40        if (xas_load(&xas) != entry)
  41                return;
  42        xas_store(&xas, NULL);
  43        mapping->nrexceptional--;
  44}
  45
  46static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
  47                               void *entry)
  48{
  49        xa_lock_irq(&mapping->i_pages);
  50        __clear_shadow_entry(mapping, index, entry);
  51        xa_unlock_irq(&mapping->i_pages);
  52}
  53
  54/*
  55 * Unconditionally remove exceptional entries. Usually called from truncate
  56 * path. Note that the pagevec may be altered by this function by removing
  57 * exceptional entries similar to what pagevec_remove_exceptionals does.
  58 */
  59static void truncate_exceptional_pvec_entries(struct address_space *mapping,
  60                                struct pagevec *pvec, pgoff_t *indices,
  61                                pgoff_t end)
  62{
  63        int i, j;
  64        bool dax, lock;
  65
  66        /* Handled by shmem itself */
  67        if (shmem_mapping(mapping))
  68                return;
  69
  70        for (j = 0; j < pagevec_count(pvec); j++)
  71                if (xa_is_value(pvec->pages[j]))
  72                        break;
  73
  74        if (j == pagevec_count(pvec))
  75                return;
  76
  77        dax = dax_mapping(mapping);
  78        lock = !dax && indices[j] < end;
  79        if (lock)
  80                xa_lock_irq(&mapping->i_pages);
  81
  82        for (i = j; i < pagevec_count(pvec); i++) {
  83                struct page *page = pvec->pages[i];
  84                pgoff_t index = indices[i];
  85
  86                if (!xa_is_value(page)) {
  87                        pvec->pages[j++] = page;
  88                        continue;
  89                }
  90
  91                if (index >= end)
  92                        continue;
  93
  94                if (unlikely(dax)) {
  95                        dax_delete_mapping_entry(mapping, index);
  96                        continue;
  97                }
  98
  99                __clear_shadow_entry(mapping, index, page);
 100        }
 101
 102        if (lock)
 103                xa_unlock_irq(&mapping->i_pages);
 104        pvec->nr = j;
 105}
 106
 107/*
 108 * Invalidate exceptional entry if easily possible. This handles exceptional
 109 * entries for invalidate_inode_pages().
 110 */
 111static int invalidate_exceptional_entry(struct address_space *mapping,
 112                                        pgoff_t index, void *entry)
 113{
 114        /* Handled by shmem itself, or for DAX we do nothing. */
 115        if (shmem_mapping(mapping) || dax_mapping(mapping))
 116                return 1;
 117        clear_shadow_entry(mapping, index, entry);
 118        return 1;
 119}
 120
 121/*
 122 * Invalidate exceptional entry if clean. This handles exceptional entries for
 123 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
 124 */
 125static int invalidate_exceptional_entry2(struct address_space *mapping,
 126                                         pgoff_t index, void *entry)
 127{
 128        /* Handled by shmem itself */
 129        if (shmem_mapping(mapping))
 130                return 1;
 131        if (dax_mapping(mapping))
 132                return dax_invalidate_mapping_entry_sync(mapping, index);
 133        clear_shadow_entry(mapping, index, entry);
 134        return 1;
 135}
 136
 137/**
 138 * do_invalidatepage - invalidate part or all of a page
 139 * @page: the page which is affected
 140 * @offset: start of the range to invalidate
 141 * @length: length of the range to invalidate
 142 *
 143 * do_invalidatepage() is called when all or part of the page has become
 144 * invalidated by a truncate operation.
 145 *
 146 * do_invalidatepage() does not have to release all buffers, but it must
 147 * ensure that no dirty buffer is left outside @offset and that no I/O
 148 * is underway against any of the blocks which are outside the truncation
 149 * point.  Because the caller is about to free (and possibly reuse) those
 150 * blocks on-disk.
 151 */
 152void do_invalidatepage(struct page *page, unsigned int offset,
 153                       unsigned int length)
 154{
 155        void (*invalidatepage)(struct page *, unsigned int, unsigned int);
 156
 157        invalidatepage = page->mapping->a_ops->invalidatepage;
 158#ifdef CONFIG_BLOCK
 159        if (!invalidatepage)
 160                invalidatepage = block_invalidatepage;
 161#endif
 162        if (invalidatepage)
 163                (*invalidatepage)(page, offset, length);
 164}
 165
 166/*
 167 * If truncate cannot remove the fs-private metadata from the page, the page
 168 * becomes orphaned.  It will be left on the LRU and may even be mapped into
 169 * user pagetables if we're racing with filemap_fault().
 170 *
 171 * We need to bail out if page->mapping is no longer equal to the original
 172 * mapping.  This happens a) when the VM reclaimed the page while we waited on
 173 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
 174 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 175 */
 176static void
 177truncate_cleanup_page(struct address_space *mapping, struct page *page)
 178{
 179        if (page_mapped(page)) {
 180                unsigned int nr = thp_nr_pages(page);
 181                unmap_mapping_pages(mapping, page->index, nr, false);
 182        }
 183
 184        if (page_has_private(page))
 185                do_invalidatepage(page, 0, thp_size(page));
 186
 187        /*
 188         * Some filesystems seem to re-dirty the page even after
 189         * the VM has canceled the dirty bit (eg ext3 journaling).
 190         * Hence dirty accounting check is placed after invalidation.
 191         */
 192        cancel_dirty_page(page);
 193        ClearPageMappedToDisk(page);
 194}
 195
 196/*
 197 * This is for invalidate_mapping_pages().  That function can be called at
 198 * any time, and is not supposed to throw away dirty pages.  But pages can
 199 * be marked dirty at any time too, so use remove_mapping which safely
 200 * discards clean, unused pages.
 201 *
 202 * Returns non-zero if the page was successfully invalidated.
 203 */
 204static int
 205invalidate_complete_page(struct address_space *mapping, struct page *page)
 206{
 207        int ret;
 208
 209        if (page->mapping != mapping)
 210                return 0;
 211
 212        if (page_has_private(page) && !try_to_release_page(page, 0))
 213                return 0;
 214
 215        ret = remove_mapping(mapping, page);
 216
 217        return ret;
 218}
 219
 220int truncate_inode_page(struct address_space *mapping, struct page *page)
 221{
 222        VM_BUG_ON_PAGE(PageTail(page), page);
 223
 224        if (page->mapping != mapping)
 225                return -EIO;
 226
 227        truncate_cleanup_page(mapping, page);
 228        delete_from_page_cache(page);
 229        return 0;
 230}
 231
 232/*
 233 * Used to get rid of pages on hardware memory corruption.
 234 */
 235int generic_error_remove_page(struct address_space *mapping, struct page *page)
 236{
 237        if (!mapping)
 238                return -EINVAL;
 239        /*
 240         * Only punch for normal data pages for now.
 241         * Handling other types like directories would need more auditing.
 242         */
 243        if (!S_ISREG(mapping->host->i_mode))
 244                return -EIO;
 245        return truncate_inode_page(mapping, page);
 246}
 247EXPORT_SYMBOL(generic_error_remove_page);
 248
 249/*
 250 * Safely invalidate one page from its pagecache mapping.
 251 * It only drops clean, unused pages. The page must be locked.
 252 *
 253 * Returns 1 if the page is successfully invalidated, otherwise 0.
 254 */
 255int invalidate_inode_page(struct page *page)
 256{
 257        struct address_space *mapping = page_mapping(page);
 258        if (!mapping)
 259                return 0;
 260        if (PageDirty(page) || PageWriteback(page))
 261                return 0;
 262        if (page_mapped(page))
 263                return 0;
 264        return invalidate_complete_page(mapping, page);
 265}
 266
 267/**
 268 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
 269 * @mapping: mapping to truncate
 270 * @lstart: offset from which to truncate
 271 * @lend: offset to which to truncate (inclusive)
 272 *
 273 * Truncate the page cache, removing the pages that are between
 274 * specified offsets (and zeroing out partial pages
 275 * if lstart or lend + 1 is not page aligned).
 276 *
 277 * Truncate takes two passes - the first pass is nonblocking.  It will not
 278 * block on page locks and it will not block on writeback.  The second pass
 279 * will wait.  This is to prevent as much IO as possible in the affected region.
 280 * The first pass will remove most pages, so the search cost of the second pass
 281 * is low.
 282 *
 283 * We pass down the cache-hot hint to the page freeing code.  Even if the
 284 * mapping is large, it is probably the case that the final pages are the most
 285 * recently touched, and freeing happens in ascending file offset order.
 286 *
 287 * Note that since ->invalidatepage() accepts range to invalidate
 288 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 289 * page aligned properly.
 290 */
 291void truncate_inode_pages_range(struct address_space *mapping,
 292                                loff_t lstart, loff_t lend)
 293{
 294        pgoff_t         start;          /* inclusive */
 295        pgoff_t         end;            /* exclusive */
 296        unsigned int    partial_start;  /* inclusive */
 297        unsigned int    partial_end;    /* exclusive */
 298        struct pagevec  pvec;
 299        pgoff_t         indices[PAGEVEC_SIZE];
 300        pgoff_t         index;
 301        int             i;
 302
 303        if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
 304                goto out;
 305
 306        /* Offsets within partial pages */
 307        partial_start = lstart & (PAGE_SIZE - 1);
 308        partial_end = (lend + 1) & (PAGE_SIZE - 1);
 309
 310        /*
 311         * 'start' and 'end' always covers the range of pages to be fully
 312         * truncated. Partial pages are covered with 'partial_start' at the
 313         * start of the range and 'partial_end' at the end of the range.
 314         * Note that 'end' is exclusive while 'lend' is inclusive.
 315         */
 316        start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
 317        if (lend == -1)
 318                /*
 319                 * lend == -1 indicates end-of-file so we have to set 'end'
 320                 * to the highest possible pgoff_t and since the type is
 321                 * unsigned we're using -1.
 322                 */
 323                end = -1;
 324        else
 325                end = (lend + 1) >> PAGE_SHIFT;
 326
 327        pagevec_init(&pvec);
 328        index = start;
 329        while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
 330                        min(end - index, (pgoff_t)PAGEVEC_SIZE),
 331                        indices)) {
 332                /*
 333                 * Pagevec array has exceptional entries and we may also fail
 334                 * to lock some pages. So we store pages that can be deleted
 335                 * in a new pagevec.
 336                 */
 337                struct pagevec locked_pvec;
 338
 339                pagevec_init(&locked_pvec);
 340                for (i = 0; i < pagevec_count(&pvec); i++) {
 341                        struct page *page = pvec.pages[i];
 342
 343                        /* We rely upon deletion not changing page->index */
 344                        index = indices[i];
 345                        if (index >= end)
 346                                break;
 347
 348                        if (xa_is_value(page))
 349                                continue;
 350
 351                        if (!trylock_page(page))
 352                                continue;
 353                        WARN_ON(page_to_index(page) != index);
 354                        if (PageWriteback(page)) {
 355                                unlock_page(page);
 356                                continue;
 357                        }
 358                        if (page->mapping != mapping) {
 359                                unlock_page(page);
 360                                continue;
 361                        }
 362                        pagevec_add(&locked_pvec, page);
 363                }
 364                for (i = 0; i < pagevec_count(&locked_pvec); i++)
 365                        truncate_cleanup_page(mapping, locked_pvec.pages[i]);
 366                delete_from_page_cache_batch(mapping, &locked_pvec);
 367                for (i = 0; i < pagevec_count(&locked_pvec); i++)
 368                        unlock_page(locked_pvec.pages[i]);
 369                truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
 370                pagevec_release(&pvec);
 371                cond_resched();
 372                index++;
 373        }
 374        if (partial_start) {
 375                struct page *page = find_lock_page(mapping, start - 1);
 376                if (page) {
 377                        unsigned int top = PAGE_SIZE;
 378                        if (start > end) {
 379                                /* Truncation within a single page */
 380                                top = partial_end;
 381                                partial_end = 0;
 382                        }
 383                        wait_on_page_writeback(page);
 384                        zero_user_segment(page, partial_start, top);
 385                        cleancache_invalidate_page(mapping, page);
 386                        if (page_has_private(page))
 387                                do_invalidatepage(page, partial_start,
 388                                                  top - partial_start);
 389                        unlock_page(page);
 390                        put_page(page);
 391                }
 392        }
 393        if (partial_end) {
 394                struct page *page = find_lock_page(mapping, end);
 395                if (page) {
 396                        wait_on_page_writeback(page);
 397                        zero_user_segment(page, 0, partial_end);
 398                        cleancache_invalidate_page(mapping, page);
 399                        if (page_has_private(page))
 400                                do_invalidatepage(page, 0,
 401                                                  partial_end);
 402                        unlock_page(page);
 403                        put_page(page);
 404                }
 405        }
 406        /*
 407         * If the truncation happened within a single page no pages
 408         * will be released, just zeroed, so we can bail out now.
 409         */
 410        if (start >= end)
 411                goto out;
 412
 413        index = start;
 414        for ( ; ; ) {
 415                cond_resched();
 416                if (!pagevec_lookup_entries(&pvec, mapping, index,
 417                        min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
 418                        /* If all gone from start onwards, we're done */
 419                        if (index == start)
 420                                break;
 421                        /* Otherwise restart to make sure all gone */
 422                        index = start;
 423                        continue;
 424                }
 425                if (index == start && indices[0] >= end) {
 426                        /* All gone out of hole to be punched, we're done */
 427                        pagevec_remove_exceptionals(&pvec);
 428                        pagevec_release(&pvec);
 429                        break;
 430                }
 431
 432                for (i = 0; i < pagevec_count(&pvec); i++) {
 433                        struct page *page = pvec.pages[i];
 434
 435                        /* We rely upon deletion not changing page->index */
 436                        index = indices[i];
 437                        if (index >= end) {
 438                                /* Restart punch to make sure all gone */
 439                                index = start - 1;
 440                                break;
 441                        }
 442
 443                        if (xa_is_value(page))
 444                                continue;
 445
 446                        lock_page(page);
 447                        WARN_ON(page_to_index(page) != index);
 448                        wait_on_page_writeback(page);
 449                        truncate_inode_page(mapping, page);
 450                        unlock_page(page);
 451                }
 452                truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
 453                pagevec_release(&pvec);
 454                index++;
 455        }
 456
 457out:
 458        cleancache_invalidate_inode(mapping);
 459}
 460EXPORT_SYMBOL(truncate_inode_pages_range);
 461
 462/**
 463 * truncate_inode_pages - truncate *all* the pages from an offset
 464 * @mapping: mapping to truncate
 465 * @lstart: offset from which to truncate
 466 *
 467 * Called under (and serialised by) inode->i_mutex.
 468 *
 469 * Note: When this function returns, there can be a page in the process of
 470 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
 471 * mapping->nrpages can be non-zero when this function returns even after
 472 * truncation of the whole mapping.
 473 */
 474void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
 475{
 476        truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
 477}
 478EXPORT_SYMBOL(truncate_inode_pages);
 479
 480/**
 481 * truncate_inode_pages_final - truncate *all* pages before inode dies
 482 * @mapping: mapping to truncate
 483 *
 484 * Called under (and serialized by) inode->i_mutex.
 485 *
 486 * Filesystems have to use this in the .evict_inode path to inform the
 487 * VM that this is the final truncate and the inode is going away.
 488 */
 489void truncate_inode_pages_final(struct address_space *mapping)
 490{
 491        unsigned long nrexceptional;
 492        unsigned long nrpages;
 493
 494        /*
 495         * Page reclaim can not participate in regular inode lifetime
 496         * management (can't call iput()) and thus can race with the
 497         * inode teardown.  Tell it when the address space is exiting,
 498         * so that it does not install eviction information after the
 499         * final truncate has begun.
 500         */
 501        mapping_set_exiting(mapping);
 502
 503        /*
 504         * When reclaim installs eviction entries, it increases
 505         * nrexceptional first, then decreases nrpages.  Make sure we see
 506         * this in the right order or we might miss an entry.
 507         */
 508        nrpages = mapping->nrpages;
 509        smp_rmb();
 510        nrexceptional = mapping->nrexceptional;
 511
 512        if (nrpages || nrexceptional) {
 513                /*
 514                 * As truncation uses a lockless tree lookup, cycle
 515                 * the tree lock to make sure any ongoing tree
 516                 * modification that does not see AS_EXITING is
 517                 * completed before starting the final truncate.
 518                 */
 519                xa_lock_irq(&mapping->i_pages);
 520                xa_unlock_irq(&mapping->i_pages);
 521        }
 522
 523        /*
 524         * Cleancache needs notification even if there are no pages or shadow
 525         * entries.
 526         */
 527        truncate_inode_pages(mapping, 0);
 528}
 529EXPORT_SYMBOL(truncate_inode_pages_final);
 530
 531static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
 532                pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
 533{
 534        pgoff_t indices[PAGEVEC_SIZE];
 535        struct pagevec pvec;
 536        pgoff_t index = start;
 537        unsigned long ret;
 538        unsigned long count = 0;
 539        int i;
 540
 541        pagevec_init(&pvec);
 542        while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
 543                        min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
 544                        indices)) {
 545                for (i = 0; i < pagevec_count(&pvec); i++) {
 546                        struct page *page = pvec.pages[i];
 547
 548                        /* We rely upon deletion not changing page->index */
 549                        index = indices[i];
 550                        if (index > end)
 551                                break;
 552
 553                        if (xa_is_value(page)) {
 554                                invalidate_exceptional_entry(mapping, index,
 555                                                             page);
 556                                continue;
 557                        }
 558
 559                        if (!trylock_page(page))
 560                                continue;
 561
 562                        WARN_ON(page_to_index(page) != index);
 563
 564                        /* Middle of THP: skip */
 565                        if (PageTransTail(page)) {
 566                                unlock_page(page);
 567                                continue;
 568                        } else if (PageTransHuge(page)) {
 569                                index += HPAGE_PMD_NR - 1;
 570                                i += HPAGE_PMD_NR - 1;
 571                                /*
 572                                 * 'end' is in the middle of THP. Don't
 573                                 * invalidate the page as the part outside of
 574                                 * 'end' could be still useful.
 575                                 */
 576                                if (index > end) {
 577                                        unlock_page(page);
 578                                        continue;
 579                                }
 580
 581                                /* Take a pin outside pagevec */
 582                                get_page(page);
 583
 584                                /*
 585                                 * Drop extra pins before trying to invalidate
 586                                 * the huge page.
 587                                 */
 588                                pagevec_remove_exceptionals(&pvec);
 589                                pagevec_release(&pvec);
 590                        }
 591
 592                        ret = invalidate_inode_page(page);
 593                        unlock_page(page);
 594                        /*
 595                         * Invalidation is a hint that the page is no longer
 596                         * of interest and try to speed up its reclaim.
 597                         */
 598                        if (!ret) {
 599                                deactivate_file_page(page);
 600                                /* It is likely on the pagevec of a remote CPU */
 601                                if (nr_pagevec)
 602                                        (*nr_pagevec)++;
 603                        }
 604
 605                        if (PageTransHuge(page))
 606                                put_page(page);
 607                        count += ret;
 608                }
 609                pagevec_remove_exceptionals(&pvec);
 610                pagevec_release(&pvec);
 611                cond_resched();
 612                index++;
 613        }
 614        return count;
 615}
 616
 617/**
 618 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 619 * @mapping: the address_space which holds the pages to invalidate
 620 * @start: the offset 'from' which to invalidate
 621 * @end: the offset 'to' which to invalidate (inclusive)
 622 *
 623 * This function only removes the unlocked pages, if you want to
 624 * remove all the pages of one inode, you must call truncate_inode_pages.
 625 *
 626 * invalidate_mapping_pages() will not block on IO activity. It will not
 627 * invalidate pages which are dirty, locked, under writeback or mapped into
 628 * pagetables.
 629 *
 630 * Return: the number of the pages that were invalidated
 631 */
 632unsigned long invalidate_mapping_pages(struct address_space *mapping,
 633                pgoff_t start, pgoff_t end)
 634{
 635        return __invalidate_mapping_pages(mapping, start, end, NULL);
 636}
 637EXPORT_SYMBOL(invalidate_mapping_pages);
 638
 639/**
 640 * This helper is similar with the above one, except that it accounts for pages
 641 * that are likely on a pagevec and count them in @nr_pagevec, which will used by
 642 * the caller.
 643 */
 644void invalidate_mapping_pagevec(struct address_space *mapping,
 645                pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
 646{
 647        __invalidate_mapping_pages(mapping, start, end, nr_pagevec);
 648}
 649
 650/*
 651 * This is like invalidate_complete_page(), except it ignores the page's
 652 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 653 * invalidation guarantees, and cannot afford to leave pages behind because
 654 * shrink_page_list() has a temp ref on them, or because they're transiently
 655 * sitting in the lru_cache_add() pagevecs.
 656 */
 657static int
 658invalidate_complete_page2(struct address_space *mapping, struct page *page)
 659{
 660        unsigned long flags;
 661
 662        if (page->mapping != mapping)
 663                return 0;
 664
 665        if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
 666                return 0;
 667
 668        xa_lock_irqsave(&mapping->i_pages, flags);
 669        if (PageDirty(page))
 670                goto failed;
 671
 672        BUG_ON(page_has_private(page));
 673        __delete_from_page_cache(page, NULL);
 674        xa_unlock_irqrestore(&mapping->i_pages, flags);
 675
 676        if (mapping->a_ops->freepage)
 677                mapping->a_ops->freepage(page);
 678
 679        put_page(page); /* pagecache ref */
 680        return 1;
 681failed:
 682        xa_unlock_irqrestore(&mapping->i_pages, flags);
 683        return 0;
 684}
 685
 686static int do_launder_page(struct address_space *mapping, struct page *page)
 687{
 688        if (!PageDirty(page))
 689                return 0;
 690        if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
 691                return 0;
 692        return mapping->a_ops->launder_page(page);
 693}
 694
 695/**
 696 * invalidate_inode_pages2_range - remove range of pages from an address_space
 697 * @mapping: the address_space
 698 * @start: the page offset 'from' which to invalidate
 699 * @end: the page offset 'to' which to invalidate (inclusive)
 700 *
 701 * Any pages which are found to be mapped into pagetables are unmapped prior to
 702 * invalidation.
 703 *
 704 * Return: -EBUSY if any pages could not be invalidated.
 705 */
 706int invalidate_inode_pages2_range(struct address_space *mapping,
 707                                  pgoff_t start, pgoff_t end)
 708{
 709        pgoff_t indices[PAGEVEC_SIZE];
 710        struct pagevec pvec;
 711        pgoff_t index;
 712        int i;
 713        int ret = 0;
 714        int ret2 = 0;
 715        int did_range_unmap = 0;
 716
 717        if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
 718                goto out;
 719
 720        pagevec_init(&pvec);
 721        index = start;
 722        while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
 723                        min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
 724                        indices)) {
 725                for (i = 0; i < pagevec_count(&pvec); i++) {
 726                        struct page *page = pvec.pages[i];
 727
 728                        /* We rely upon deletion not changing page->index */
 729                        index = indices[i];
 730                        if (index > end)
 731                                break;
 732
 733                        if (xa_is_value(page)) {
 734                                if (!invalidate_exceptional_entry2(mapping,
 735                                                                   index, page))
 736                                        ret = -EBUSY;
 737                                continue;
 738                        }
 739
 740                        lock_page(page);
 741                        WARN_ON(page_to_index(page) != index);
 742                        if (page->mapping != mapping) {
 743                                unlock_page(page);
 744                                continue;
 745                        }
 746                        wait_on_page_writeback(page);
 747                        if (page_mapped(page)) {
 748                                if (!did_range_unmap) {
 749                                        /*
 750                                         * Zap the rest of the file in one hit.
 751                                         */
 752                                        unmap_mapping_pages(mapping, index,
 753                                                (1 + end - index), false);
 754                                        did_range_unmap = 1;
 755                                } else {
 756                                        /*
 757                                         * Just zap this page
 758                                         */
 759                                        unmap_mapping_pages(mapping, index,
 760                                                                1, false);
 761                                }
 762                        }
 763                        BUG_ON(page_mapped(page));
 764                        ret2 = do_launder_page(mapping, page);
 765                        if (ret2 == 0) {
 766                                if (!invalidate_complete_page2(mapping, page))
 767                                        ret2 = -EBUSY;
 768                        }
 769                        if (ret2 < 0)
 770                                ret = ret2;
 771                        unlock_page(page);
 772                }
 773                pagevec_remove_exceptionals(&pvec);
 774                pagevec_release(&pvec);
 775                cond_resched();
 776                index++;
 777        }
 778        /*
 779         * For DAX we invalidate page tables after invalidating page cache.  We
 780         * could invalidate page tables while invalidating each entry however
 781         * that would be expensive. And doing range unmapping before doesn't
 782         * work as we have no cheap way to find whether page cache entry didn't
 783         * get remapped later.
 784         */
 785        if (dax_mapping(mapping)) {
 786                unmap_mapping_pages(mapping, start, end - start + 1, false);
 787        }
 788out:
 789        cleancache_invalidate_inode(mapping);
 790        return ret;
 791}
 792EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
 793
 794/**
 795 * invalidate_inode_pages2 - remove all pages from an address_space
 796 * @mapping: the address_space
 797 *
 798 * Any pages which are found to be mapped into pagetables are unmapped prior to
 799 * invalidation.
 800 *
 801 * Return: -EBUSY if any pages could not be invalidated.
 802 */
 803int invalidate_inode_pages2(struct address_space *mapping)
 804{
 805        return invalidate_inode_pages2_range(mapping, 0, -1);
 806}
 807EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
 808
 809/**
 810 * truncate_pagecache - unmap and remove pagecache that has been truncated
 811 * @inode: inode
 812 * @newsize: new file size
 813 *
 814 * inode's new i_size must already be written before truncate_pagecache
 815 * is called.
 816 *
 817 * This function should typically be called before the filesystem
 818 * releases resources associated with the freed range (eg. deallocates
 819 * blocks). This way, pagecache will always stay logically coherent
 820 * with on-disk format, and the filesystem would not have to deal with
 821 * situations such as writepage being called for a page that has already
 822 * had its underlying blocks deallocated.
 823 */
 824void truncate_pagecache(struct inode *inode, loff_t newsize)
 825{
 826        struct address_space *mapping = inode->i_mapping;
 827        loff_t holebegin = round_up(newsize, PAGE_SIZE);
 828
 829        /*
 830         * unmap_mapping_range is called twice, first simply for
 831         * efficiency so that truncate_inode_pages does fewer
 832         * single-page unmaps.  However after this first call, and
 833         * before truncate_inode_pages finishes, it is possible for
 834         * private pages to be COWed, which remain after
 835         * truncate_inode_pages finishes, hence the second
 836         * unmap_mapping_range call must be made for correctness.
 837         */
 838        unmap_mapping_range(mapping, holebegin, 0, 1);
 839        truncate_inode_pages(mapping, newsize);
 840        unmap_mapping_range(mapping, holebegin, 0, 1);
 841}
 842EXPORT_SYMBOL(truncate_pagecache);
 843
 844/**
 845 * truncate_setsize - update inode and pagecache for a new file size
 846 * @inode: inode
 847 * @newsize: new file size
 848 *
 849 * truncate_setsize updates i_size and performs pagecache truncation (if
 850 * necessary) to @newsize. It will be typically be called from the filesystem's
 851 * setattr function when ATTR_SIZE is passed in.
 852 *
 853 * Must be called with a lock serializing truncates and writes (generally
 854 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
 855 * specific block truncation has been performed.
 856 */
 857void truncate_setsize(struct inode *inode, loff_t newsize)
 858{
 859        loff_t oldsize = inode->i_size;
 860
 861        i_size_write(inode, newsize);
 862        if (newsize > oldsize)
 863                pagecache_isize_extended(inode, oldsize, newsize);
 864        truncate_pagecache(inode, newsize);
 865}
 866EXPORT_SYMBOL(truncate_setsize);
 867
 868/**
 869 * pagecache_isize_extended - update pagecache after extension of i_size
 870 * @inode:      inode for which i_size was extended
 871 * @from:       original inode size
 872 * @to:         new inode size
 873 *
 874 * Handle extension of inode size either caused by extending truncate or by
 875 * write starting after current i_size. We mark the page straddling current
 876 * i_size RO so that page_mkwrite() is called on the nearest write access to
 877 * the page.  This way filesystem can be sure that page_mkwrite() is called on
 878 * the page before user writes to the page via mmap after the i_size has been
 879 * changed.
 880 *
 881 * The function must be called after i_size is updated so that page fault
 882 * coming after we unlock the page will already see the new i_size.
 883 * The function must be called while we still hold i_mutex - this not only
 884 * makes sure i_size is stable but also that userspace cannot observe new
 885 * i_size value before we are prepared to store mmap writes at new inode size.
 886 */
 887void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
 888{
 889        int bsize = i_blocksize(inode);
 890        loff_t rounded_from;
 891        struct page *page;
 892        pgoff_t index;
 893
 894        WARN_ON(to > inode->i_size);
 895
 896        if (from >= to || bsize == PAGE_SIZE)
 897                return;
 898        /* Page straddling @from will not have any hole block created? */
 899        rounded_from = round_up(from, bsize);
 900        if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
 901                return;
 902
 903        index = from >> PAGE_SHIFT;
 904        page = find_lock_page(inode->i_mapping, index);
 905        /* Page not cached? Nothing to do */
 906        if (!page)
 907                return;
 908        /*
 909         * See clear_page_dirty_for_io() for details why set_page_dirty()
 910         * is needed.
 911         */
 912        if (page_mkclean(page))
 913                set_page_dirty(page);
 914        unlock_page(page);
 915        put_page(page);
 916}
 917EXPORT_SYMBOL(pagecache_isize_extended);
 918
 919/**
 920 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 921 * @inode: inode
 922 * @lstart: offset of beginning of hole
 923 * @lend: offset of last byte of hole
 924 *
 925 * This function should typically be called before the filesystem
 926 * releases resources associated with the freed range (eg. deallocates
 927 * blocks). This way, pagecache will always stay logically coherent
 928 * with on-disk format, and the filesystem would not have to deal with
 929 * situations such as writepage being called for a page that has already
 930 * had its underlying blocks deallocated.
 931 */
 932void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
 933{
 934        struct address_space *mapping = inode->i_mapping;
 935        loff_t unmap_start = round_up(lstart, PAGE_SIZE);
 936        loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
 937        /*
 938         * This rounding is currently just for example: unmap_mapping_range
 939         * expands its hole outwards, whereas we want it to contract the hole
 940         * inwards.  However, existing callers of truncate_pagecache_range are
 941         * doing their own page rounding first.  Note that unmap_mapping_range
 942         * allows holelen 0 for all, and we allow lend -1 for end of file.
 943         */
 944
 945        /*
 946         * Unlike in truncate_pagecache, unmap_mapping_range is called only
 947         * once (before truncating pagecache), and without "even_cows" flag:
 948         * hole-punching should not remove private COWed pages from the hole.
 949         */
 950        if ((u64)unmap_end > (u64)unmap_start)
 951                unmap_mapping_range(mapping, unmap_start,
 952                                    1 + unmap_end - unmap_start, 0);
 953        truncate_inode_pages_range(mapping, lstart, lend);
 954}
 955EXPORT_SYMBOL(truncate_pagecache_range);
 956