linux/mm/truncate.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/truncate.c - code for taking down pages from address_spaces
   4 *
   5 * Copyright (C) 2002, Linus Torvalds
   6 *
   7 * 10Sep2002    Andrew Morton
   8 *              Initial version.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/backing-dev.h>
  13#include <linux/dax.h>
  14#include <linux/gfp.h>
  15#include <linux/mm.h>
  16#include <linux/swap.h>
  17#include <linux/export.h>
  18#include <linux/pagemap.h>
  19#include <linux/highmem.h>
  20#include <linux/pagevec.h>
  21#include <linux/task_io_accounting_ops.h>
  22#include <linux/shmem_fs.h>
  23#include <linux/rmap.h>
  24#include "internal.h"
  25
  26static void clear_shadow_entries(struct address_space *mapping,
  27                                 unsigned long start, unsigned long max)
  28{
  29        XA_STATE(xas, &mapping->i_pages, start);
  30        struct folio *folio;
  31
  32        /* Handled by shmem itself, or for DAX we do nothing. */
  33        if (shmem_mapping(mapping) || dax_mapping(mapping))
  34                return;
  35
  36        xas_set_update(&xas, workingset_update_node);
  37
  38        spin_lock(&mapping->host->i_lock);
  39        xas_lock_irq(&xas);
  40
  41        /* Clear all shadow entries from start to max */
  42        xas_for_each(&xas, folio, max) {
  43                if (xa_is_value(folio))
  44                        xas_store(&xas, NULL);
  45        }
  46
  47        xas_unlock_irq(&xas);
  48        if (mapping_shrinkable(mapping))
  49                inode_add_lru(mapping->host);
  50        spin_unlock(&mapping->host->i_lock);
  51}
  52
  53/*
  54 * Unconditionally remove exceptional entries. Usually called from truncate
  55 * path. Note that the folio_batch may be altered by this function by removing
  56 * exceptional entries similar to what folio_batch_remove_exceptionals() does.
  57 * Please note that indices[] has entries in ascending order as guaranteed by
  58 * either find_get_entries() or find_lock_entries().
  59 */
  60static void truncate_folio_batch_exceptionals(struct address_space *mapping,
  61                                struct folio_batch *fbatch, pgoff_t *indices)
  62{
  63        XA_STATE(xas, &mapping->i_pages, indices[0]);
  64        int nr = folio_batch_count(fbatch);
  65        struct folio *folio;
  66        int i, j;
  67
  68        /* Handled by shmem itself */
  69        if (shmem_mapping(mapping))
  70                return;
  71
  72        for (j = 0; j < nr; j++)
  73                if (xa_is_value(fbatch->folios[j]))
  74                        break;
  75
  76        if (j == nr)
  77                return;
  78
  79        if (dax_mapping(mapping)) {
  80                for (i = j; i < nr; i++) {
  81                        if (xa_is_value(fbatch->folios[i])) {
  82                                /*
  83                                 * File systems should already have called
  84                                 * dax_break_layout_entry() to remove all DAX
  85                                 * entries while holding a lock to prevent
  86                                 * establishing new entries. Therefore we
  87                                 * shouldn't find any here.
  88                                 */
  89                                WARN_ON_ONCE(1);
  90
  91                                /*
  92                                 * Delete the mapping so truncate_pagecache()
  93                                 * doesn't loop forever.
  94                                 */
  95                                dax_delete_mapping_entry(mapping, indices[i]);
  96                        }
  97                }
  98                goto out;
  99        }
 100
 101        xas_set(&xas, indices[j]);
 102        xas_set_update(&xas, workingset_update_node);
 103
 104        spin_lock(&mapping->host->i_lock);
 105        xas_lock_irq(&xas);
 106
 107        xas_for_each(&xas, folio, indices[nr-1]) {
 108                if (xa_is_value(folio))
 109                        xas_store(&xas, NULL);
 110        }
 111
 112        xas_unlock_irq(&xas);
 113        if (mapping_shrinkable(mapping))
 114                inode_add_lru(mapping->host);
 115        spin_unlock(&mapping->host->i_lock);
 116out:
 117        folio_batch_remove_exceptionals(fbatch);
 118}
 119
 120/**
 121 * folio_invalidate - Invalidate part or all of a folio.
 122 * @folio: The folio which is affected.
 123 * @offset: start of the range to invalidate
 124 * @length: length of the range to invalidate
 125 *
 126 * folio_invalidate() is called when all or part of the folio has become
 127 * invalidated by a truncate operation.
 128 *
 129 * folio_invalidate() does not have to release all buffers, but it must
 130 * ensure that no dirty buffer is left outside @offset and that no I/O
 131 * is underway against any of the blocks which are outside the truncation
 132 * point.  Because the caller is about to free (and possibly reuse) those
 133 * blocks on-disk.
 134 */
 135void folio_invalidate(struct folio *folio, size_t offset, size_t length)
 136{
 137        const struct address_space_operations *aops = folio->mapping->a_ops;
 138
 139        if (aops->invalidate_folio)
 140                aops->invalidate_folio(folio, offset, length);
 141}
 142EXPORT_SYMBOL_GPL(folio_invalidate);
 143
 144/*
 145 * If truncate cannot remove the fs-private metadata from the page, the page
 146 * becomes orphaned.  It will be left on the LRU and may even be mapped into
 147 * user pagetables if we're racing with filemap_fault().
 148 *
 149 * We need to bail out if page->mapping is no longer equal to the original
 150 * mapping.  This happens a) when the VM reclaimed the page while we waited on
 151 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
 152 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 153 */
 154static void truncate_cleanup_folio(struct folio *folio)
 155{
 156        if (folio_mapped(folio))
 157                unmap_mapping_folio(folio);
 158
 159        if (folio_needs_release(folio))
 160                folio_invalidate(folio, 0, folio_size(folio));
 161
 162        /*
 163         * Some filesystems seem to re-dirty the page even after
 164         * the VM has canceled the dirty bit (eg ext3 journaling).
 165         * Hence dirty accounting check is placed after invalidation.
 166         */
 167        folio_cancel_dirty(folio);
 168}
 169
 170int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
 171{
 172        if (folio->mapping != mapping)
 173                return -EIO;
 174
 175        truncate_cleanup_folio(folio);
 176        filemap_remove_folio(folio);
 177        return 0;
 178}
 179
 180/*
 181 * Handle partial folios.  The folio may be entirely within the
 182 * range if a split has raced with us.  If not, we zero the part of the
 183 * folio that's within the [start, end] range, and then split the folio if
 184 * it's large.  split_page_range() will discard pages which now lie beyond
 185 * i_size, and we rely on the caller to discard pages which lie within a
 186 * newly created hole.
 187 *
 188 * Returns false if splitting failed so the caller can avoid
 189 * discarding the entire folio which is stubbornly unsplit.
 190 */
 191bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
 192{
 193        loff_t pos = folio_pos(folio);
 194        size_t size = folio_size(folio);
 195        unsigned int offset, length;
 196        struct page *split_at, *split_at2;
 197
 198        if (pos < start)
 199                offset = start - pos;
 200        else
 201                offset = 0;
 202        if (pos + size <= (u64)end)
 203                length = size - offset;
 204        else
 205                length = end + 1 - pos - offset;
 206
 207        folio_wait_writeback(folio);
 208        if (length == size) {
 209                truncate_inode_folio(folio->mapping, folio);
 210                return true;
 211        }
 212
 213        /*
 214         * We may be zeroing pages we're about to discard, but it avoids
 215         * doing a complex calculation here, and then doing the zeroing
 216         * anyway if the page split fails.
 217         */
 218        if (!mapping_inaccessible(folio->mapping))
 219                folio_zero_range(folio, offset, length);
 220
 221        if (folio_needs_release(folio))
 222                folio_invalidate(folio, offset, length);
 223        if (!folio_test_large(folio))
 224                return true;
 225
 226        split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
 227        if (!try_folio_split(folio, split_at, NULL)) {
 228                /*
 229                 * try to split at offset + length to make sure folios within
 230                 * the range can be dropped, especially to avoid memory waste
 231                 * for shmem truncate
 232                 */
 233                struct folio *folio2;
 234
 235                if (offset + length == size)
 236                        goto no_split;
 237
 238                split_at2 = folio_page(folio,
 239                                PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
 240                folio2 = page_folio(split_at2);
 241
 242                if (!folio_try_get(folio2))
 243                        goto no_split;
 244
 245                if (!folio_test_large(folio2))
 246                        goto out;
 247
 248                if (!folio_trylock(folio2))
 249                        goto out;
 250
 251                /*
 252                 * make sure folio2 is large and does not change its mapping.
 253                 * Its split result does not matter here.
 254                 */
 255                if (folio_test_large(folio2) &&
 256                    folio2->mapping == folio->mapping)
 257                        try_folio_split(folio2, split_at2, NULL);
 258
 259                folio_unlock(folio2);
 260out:
 261                folio_put(folio2);
 262no_split:
 263                return true;
 264        }
 265        if (folio_test_dirty(folio))
 266                return false;
 267        truncate_inode_folio(folio->mapping, folio);
 268        return true;
 269}
 270
 271/*
 272 * Used to get rid of pages on hardware memory corruption.
 273 */
 274int generic_error_remove_folio(struct address_space *mapping,
 275                struct folio *folio)
 276{
 277        if (!mapping)
 278                return -EINVAL;
 279        /*
 280         * Only punch for normal data pages for now.
 281         * Handling other types like directories would need more auditing.
 282         */
 283        if (!S_ISREG(mapping->host->i_mode))
 284                return -EIO;
 285        return truncate_inode_folio(mapping, folio);
 286}
 287EXPORT_SYMBOL(generic_error_remove_folio);
 288
 289/**
 290 * mapping_evict_folio() - Remove an unused folio from the page-cache.
 291 * @mapping: The mapping this folio belongs to.
 292 * @folio: The folio to remove.
 293 *
 294 * Safely remove one folio from the page cache.
 295 * It only drops clean, unused folios.
 296 *
 297 * Context: Folio must be locked.
 298 * Return: The number of pages successfully removed.
 299 */
 300long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
 301{
 302        /* The page may have been truncated before it was locked */
 303        if (!mapping)
 304                return 0;
 305        if (folio_test_dirty(folio) || folio_test_writeback(folio))
 306                return 0;
 307        /* The refcount will be elevated if any page in the folio is mapped */
 308        if (folio_ref_count(folio) >
 309                        folio_nr_pages(folio) + folio_has_private(folio) + 1)
 310                return 0;
 311        if (!filemap_release_folio(folio, 0))
 312                return 0;
 313
 314        return remove_mapping(mapping, folio);
 315}
 316
 317/**
 318 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
 319 * @mapping: mapping to truncate
 320 * @lstart: offset from which to truncate
 321 * @lend: offset to which to truncate (inclusive)
 322 *
 323 * Truncate the page cache, removing the pages that are between
 324 * specified offsets (and zeroing out partial pages
 325 * if lstart or lend + 1 is not page aligned).
 326 *
 327 * Truncate takes two passes - the first pass is nonblocking.  It will not
 328 * block on page locks and it will not block on writeback.  The second pass
 329 * will wait.  This is to prevent as much IO as possible in the affected region.
 330 * The first pass will remove most pages, so the search cost of the second pass
 331 * is low.
 332 *
 333 * We pass down the cache-hot hint to the page freeing code.  Even if the
 334 * mapping is large, it is probably the case that the final pages are the most
 335 * recently touched, and freeing happens in ascending file offset order.
 336 *
 337 * Note that since ->invalidate_folio() accepts range to invalidate
 338 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 339 * page aligned properly.
 340 */
 341void truncate_inode_pages_range(struct address_space *mapping,
 342                                loff_t lstart, loff_t lend)
 343{
 344        pgoff_t         start;          /* inclusive */
 345        pgoff_t         end;            /* exclusive */
 346        struct folio_batch fbatch;
 347        pgoff_t         indices[PAGEVEC_SIZE];
 348        pgoff_t         index;
 349        int             i;
 350        struct folio    *folio;
 351        bool            same_folio;
 352
 353        if (mapping_empty(mapping))
 354                return;
 355
 356        /*
 357         * 'start' and 'end' always covers the range of pages to be fully
 358         * truncated. Partial pages are covered with 'partial_start' at the
 359         * start of the range and 'partial_end' at the end of the range.
 360         * Note that 'end' is exclusive while 'lend' is inclusive.
 361         */
 362        start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
 363        if (lend == -1)
 364                /*
 365                 * lend == -1 indicates end-of-file so we have to set 'end'
 366                 * to the highest possible pgoff_t and since the type is
 367                 * unsigned we're using -1.
 368                 */
 369                end = -1;
 370        else
 371                end = (lend + 1) >> PAGE_SHIFT;
 372
 373        folio_batch_init(&fbatch);
 374        index = start;
 375        while (index < end && find_lock_entries(mapping, &index, end - 1,
 376                        &fbatch, indices)) {
 377                truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
 378                for (i = 0; i < folio_batch_count(&fbatch); i++)
 379                        truncate_cleanup_folio(fbatch.folios[i]);
 380                delete_from_page_cache_batch(mapping, &fbatch);
 381                for (i = 0; i < folio_batch_count(&fbatch); i++)
 382                        folio_unlock(fbatch.folios[i]);
 383                folio_batch_release(&fbatch);
 384                cond_resched();
 385        }
 386
 387        same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
 388        folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
 389        if (!IS_ERR(folio)) {
 390                same_folio = lend < folio_pos(folio) + folio_size(folio);
 391                if (!truncate_inode_partial_folio(folio, lstart, lend)) {
 392                        start = folio_next_index(folio);
 393                        if (same_folio)
 394                                end = folio->index;
 395                }
 396                folio_unlock(folio);
 397                folio_put(folio);
 398                folio = NULL;
 399        }
 400
 401        if (!same_folio) {
 402                folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
 403                                                FGP_LOCK, 0);
 404                if (!IS_ERR(folio)) {
 405                        if (!truncate_inode_partial_folio(folio, lstart, lend))
 406                                end = folio->index;
 407                        folio_unlock(folio);
 408                        folio_put(folio);
 409                }
 410        }
 411
 412        index = start;
 413        while (index < end) {
 414                cond_resched();
 415                if (!find_get_entries(mapping, &index, end - 1, &fbatch,
 416                                indices)) {
 417                        /* If all gone from start onwards, we're done */
 418                        if (index == start)
 419                                break;
 420                        /* Otherwise restart to make sure all gone */
 421                        index = start;
 422                        continue;
 423                }
 424
 425                for (i = 0; i < folio_batch_count(&fbatch); i++) {
 426                        struct folio *folio = fbatch.folios[i];
 427
 428                        /* We rely upon deletion not changing folio->index */
 429
 430                        if (xa_is_value(folio))
 431                                continue;
 432
 433                        folio_lock(folio);
 434                        VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
 435                        folio_wait_writeback(folio);
 436                        truncate_inode_folio(mapping, folio);
 437                        folio_unlock(folio);
 438                }
 439                truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
 440                folio_batch_release(&fbatch);
 441        }
 442}
 443EXPORT_SYMBOL(truncate_inode_pages_range);
 444
 445/**
 446 * truncate_inode_pages - truncate *all* the pages from an offset
 447 * @mapping: mapping to truncate
 448 * @lstart: offset from which to truncate
 449 *
 450 * Called under (and serialised by) inode->i_rwsem and
 451 * mapping->invalidate_lock.
 452 *
 453 * Note: When this function returns, there can be a page in the process of
 454 * deletion (inside __filemap_remove_folio()) in the specified range.  Thus
 455 * mapping->nrpages can be non-zero when this function returns even after
 456 * truncation of the whole mapping.
 457 */
 458void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
 459{
 460        truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
 461}
 462EXPORT_SYMBOL(truncate_inode_pages);
 463
 464/**
 465 * truncate_inode_pages_final - truncate *all* pages before inode dies
 466 * @mapping: mapping to truncate
 467 *
 468 * Called under (and serialized by) inode->i_rwsem.
 469 *
 470 * Filesystems have to use this in the .evict_inode path to inform the
 471 * VM that this is the final truncate and the inode is going away.
 472 */
 473void truncate_inode_pages_final(struct address_space *mapping)
 474{
 475        /*
 476         * Page reclaim can not participate in regular inode lifetime
 477         * management (can't call iput()) and thus can race with the
 478         * inode teardown.  Tell it when the address space is exiting,
 479         * so that it does not install eviction information after the
 480         * final truncate has begun.
 481         */
 482        mapping_set_exiting(mapping);
 483
 484        if (!mapping_empty(mapping)) {
 485                /*
 486                 * As truncation uses a lockless tree lookup, cycle
 487                 * the tree lock to make sure any ongoing tree
 488                 * modification that does not see AS_EXITING is
 489                 * completed before starting the final truncate.
 490                 */
 491                xa_lock_irq(&mapping->i_pages);
 492                xa_unlock_irq(&mapping->i_pages);
 493        }
 494
 495        truncate_inode_pages(mapping, 0);
 496}
 497EXPORT_SYMBOL(truncate_inode_pages_final);
 498
 499/**
 500 * mapping_try_invalidate - Invalidate all the evictable folios of one inode
 501 * @mapping: the address_space which holds the folios to invalidate
 502 * @start: the offset 'from' which to invalidate
 503 * @end: the offset 'to' which to invalidate (inclusive)
 504 * @nr_failed: How many folio invalidations failed
 505 *
 506 * This function is similar to invalidate_mapping_pages(), except that it
 507 * returns the number of folios which could not be evicted in @nr_failed.
 508 */
 509unsigned long mapping_try_invalidate(struct address_space *mapping,
 510                pgoff_t start, pgoff_t end, unsigned long *nr_failed)
 511{
 512        pgoff_t indices[PAGEVEC_SIZE];
 513        struct folio_batch fbatch;
 514        pgoff_t index = start;
 515        unsigned long ret;
 516        unsigned long count = 0;
 517        int i;
 518
 519        folio_batch_init(&fbatch);
 520        while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
 521                bool xa_has_values = false;
 522                int nr = folio_batch_count(&fbatch);
 523
 524                for (i = 0; i < nr; i++) {
 525                        struct folio *folio = fbatch.folios[i];
 526
 527                        /* We rely upon deletion not changing folio->index */
 528
 529                        if (xa_is_value(folio)) {
 530                                xa_has_values = true;
 531                                count++;
 532                                continue;
 533                        }
 534
 535                        ret = mapping_evict_folio(mapping, folio);
 536                        folio_unlock(folio);
 537                        /*
 538                         * Invalidation is a hint that the folio is no longer
 539                         * of interest and try to speed up its reclaim.
 540                         */
 541                        if (!ret) {
 542                                deactivate_file_folio(folio);
 543                                /* Likely in the lru cache of a remote CPU */
 544                                if (nr_failed)
 545                                        (*nr_failed)++;
 546                        }
 547                        count += ret;
 548                }
 549
 550                if (xa_has_values)
 551                        clear_shadow_entries(mapping, indices[0], indices[nr-1]);
 552
 553                folio_batch_remove_exceptionals(&fbatch);
 554                folio_batch_release(&fbatch);
 555                cond_resched();
 556        }
 557        return count;
 558}
 559
 560/**
 561 * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode
 562 * @mapping: the address_space which holds the cache to invalidate
 563 * @start: the offset 'from' which to invalidate
 564 * @end: the offset 'to' which to invalidate (inclusive)
 565 *
 566 * This function removes pages that are clean, unmapped and unlocked,
 567 * as well as shadow entries. It will not block on IO activity.
 568 *
 569 * If you want to remove all the pages of one inode, regardless of
 570 * their use and writeback state, use truncate_inode_pages().
 571 *
 572 * Return: The number of indices that had their contents invalidated
 573 */
 574unsigned long invalidate_mapping_pages(struct address_space *mapping,
 575                pgoff_t start, pgoff_t end)
 576{
 577        return mapping_try_invalidate(mapping, start, end, NULL);
 578}
 579EXPORT_SYMBOL(invalidate_mapping_pages);
 580
 581static int folio_launder(struct address_space *mapping, struct folio *folio)
 582{
 583        if (!folio_test_dirty(folio))
 584                return 0;
 585        if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
 586                return 0;
 587        return mapping->a_ops->launder_folio(folio);
 588}
 589
 590/*
 591 * This is like mapping_evict_folio(), except it ignores the folio's
 592 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 593 * invalidation guarantees, and cannot afford to leave folios behind because
 594 * shrink_folio_list() has a temp ref on them, or because they're transiently
 595 * sitting in the folio_add_lru() caches.
 596 */
 597int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
 598                           gfp_t gfp)
 599{
 600        int ret;
 601
 602        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 603
 604        if (folio_mapped(folio))
 605                unmap_mapping_folio(folio);
 606        BUG_ON(folio_mapped(folio));
 607
 608        ret = folio_launder(mapping, folio);
 609        if (ret)
 610                return ret;
 611        if (folio->mapping != mapping)
 612                return -EBUSY;
 613        if (!filemap_release_folio(folio, gfp))
 614                return -EBUSY;
 615
 616        spin_lock(&mapping->host->i_lock);
 617        xa_lock_irq(&mapping->i_pages);
 618        if (folio_test_dirty(folio))
 619                goto failed;
 620
 621        BUG_ON(folio_has_private(folio));
 622        __filemap_remove_folio(folio, NULL);
 623        xa_unlock_irq(&mapping->i_pages);
 624        if (mapping_shrinkable(mapping))
 625                inode_add_lru(mapping->host);
 626        spin_unlock(&mapping->host->i_lock);
 627
 628        filemap_free_folio(mapping, folio);
 629        return 1;
 630failed:
 631        xa_unlock_irq(&mapping->i_pages);
 632        spin_unlock(&mapping->host->i_lock);
 633        return -EBUSY;
 634}
 635
 636/**
 637 * invalidate_inode_pages2_range - remove range of pages from an address_space
 638 * @mapping: the address_space
 639 * @start: the page offset 'from' which to invalidate
 640 * @end: the page offset 'to' which to invalidate (inclusive)
 641 *
 642 * Any pages which are found to be mapped into pagetables are unmapped prior to
 643 * invalidation.
 644 *
 645 * Return: -EBUSY if any pages could not be invalidated.
 646 */
 647int invalidate_inode_pages2_range(struct address_space *mapping,
 648                                  pgoff_t start, pgoff_t end)
 649{
 650        pgoff_t indices[PAGEVEC_SIZE];
 651        struct folio_batch fbatch;
 652        pgoff_t index;
 653        int i;
 654        int ret = 0;
 655        int ret2 = 0;
 656        int did_range_unmap = 0;
 657
 658        if (mapping_empty(mapping))
 659                return 0;
 660
 661        folio_batch_init(&fbatch);
 662        index = start;
 663        while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
 664                bool xa_has_values = false;
 665                int nr = folio_batch_count(&fbatch);
 666
 667                for (i = 0; i < nr; i++) {
 668                        struct folio *folio = fbatch.folios[i];
 669
 670                        /* We rely upon deletion not changing folio->index */
 671
 672                        if (xa_is_value(folio)) {
 673                                xa_has_values = true;
 674                                if (dax_mapping(mapping) &&
 675                                    !dax_invalidate_mapping_entry_sync(mapping, indices[i]))
 676                                        ret = -EBUSY;
 677                                continue;
 678                        }
 679
 680                        if (!did_range_unmap && folio_mapped(folio)) {
 681                                /*
 682                                 * If folio is mapped, before taking its lock,
 683                                 * zap the rest of the file in one hit.
 684                                 */
 685                                unmap_mapping_pages(mapping, indices[i],
 686                                                (1 + end - indices[i]), false);
 687                                did_range_unmap = 1;
 688                        }
 689
 690                        folio_lock(folio);
 691                        if (unlikely(folio->mapping != mapping)) {
 692                                folio_unlock(folio);
 693                                continue;
 694                        }
 695                        VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
 696                        folio_wait_writeback(folio);
 697                        ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
 698                        if (ret2 < 0)
 699                                ret = ret2;
 700                        folio_unlock(folio);
 701                }
 702
 703                if (xa_has_values)
 704                        clear_shadow_entries(mapping, indices[0], indices[nr-1]);
 705
 706                folio_batch_remove_exceptionals(&fbatch);
 707                folio_batch_release(&fbatch);
 708                cond_resched();
 709        }
 710        /*
 711         * For DAX we invalidate page tables after invalidating page cache.  We
 712         * could invalidate page tables while invalidating each entry however
 713         * that would be expensive. And doing range unmapping before doesn't
 714         * work as we have no cheap way to find whether page cache entry didn't
 715         * get remapped later.
 716         */
 717        if (dax_mapping(mapping)) {
 718                unmap_mapping_pages(mapping, start, end - start + 1, false);
 719        }
 720        return ret;
 721}
 722EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
 723
 724/**
 725 * invalidate_inode_pages2 - remove all pages from an address_space
 726 * @mapping: the address_space
 727 *
 728 * Any pages which are found to be mapped into pagetables are unmapped prior to
 729 * invalidation.
 730 *
 731 * Return: -EBUSY if any pages could not be invalidated.
 732 */
 733int invalidate_inode_pages2(struct address_space *mapping)
 734{
 735        return invalidate_inode_pages2_range(mapping, 0, -1);
 736}
 737EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
 738
 739/**
 740 * truncate_pagecache - unmap and remove pagecache that has been truncated
 741 * @inode: inode
 742 * @newsize: new file size
 743 *
 744 * inode's new i_size must already be written before truncate_pagecache
 745 * is called.
 746 *
 747 * This function should typically be called before the filesystem
 748 * releases resources associated with the freed range (eg. deallocates
 749 * blocks). This way, pagecache will always stay logically coherent
 750 * with on-disk format, and the filesystem would not have to deal with
 751 * situations such as writepage being called for a page that has already
 752 * had its underlying blocks deallocated.
 753 */
 754void truncate_pagecache(struct inode *inode, loff_t newsize)
 755{
 756        struct address_space *mapping = inode->i_mapping;
 757        loff_t holebegin = round_up(newsize, PAGE_SIZE);
 758
 759        /*
 760         * unmap_mapping_range is called twice, first simply for
 761         * efficiency so that truncate_inode_pages does fewer
 762         * single-page unmaps.  However after this first call, and
 763         * before truncate_inode_pages finishes, it is possible for
 764         * private pages to be COWed, which remain after
 765         * truncate_inode_pages finishes, hence the second
 766         * unmap_mapping_range call must be made for correctness.
 767         */
 768        unmap_mapping_range(mapping, holebegin, 0, 1);
 769        truncate_inode_pages(mapping, newsize);
 770        unmap_mapping_range(mapping, holebegin, 0, 1);
 771}
 772EXPORT_SYMBOL(truncate_pagecache);
 773
 774/**
 775 * truncate_setsize - update inode and pagecache for a new file size
 776 * @inode: inode
 777 * @newsize: new file size
 778 *
 779 * truncate_setsize updates i_size and performs pagecache truncation (if
 780 * necessary) to @newsize. It will be typically be called from the filesystem's
 781 * setattr function when ATTR_SIZE is passed in.
 782 *
 783 * Must be called with a lock serializing truncates and writes (generally
 784 * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
 785 * specific block truncation has been performed.
 786 */
 787void truncate_setsize(struct inode *inode, loff_t newsize)
 788{
 789        loff_t oldsize = inode->i_size;
 790
 791        i_size_write(inode, newsize);
 792        if (newsize > oldsize)
 793                pagecache_isize_extended(inode, oldsize, newsize);
 794        truncate_pagecache(inode, newsize);
 795}
 796EXPORT_SYMBOL(truncate_setsize);
 797
 798/**
 799 * pagecache_isize_extended - update pagecache after extension of i_size
 800 * @inode:      inode for which i_size was extended
 801 * @from:       original inode size
 802 * @to:         new inode size
 803 *
 804 * Handle extension of inode size either caused by extending truncate or
 805 * by write starting after current i_size.  We mark the page straddling
 806 * current i_size RO so that page_mkwrite() is called on the first
 807 * write access to the page.  The filesystem will update its per-block
 808 * information before user writes to the page via mmap after the i_size
 809 * has been changed.
 810 *
 811 * The function must be called after i_size is updated so that page fault
 812 * coming after we unlock the folio will already see the new i_size.
 813 * The function must be called while we still hold i_rwsem - this not only
 814 * makes sure i_size is stable but also that userspace cannot observe new
 815 * i_size value before we are prepared to store mmap writes at new inode size.
 816 */
 817void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
 818{
 819        int bsize = i_blocksize(inode);
 820        loff_t rounded_from;
 821        struct folio *folio;
 822
 823        WARN_ON(to > inode->i_size);
 824
 825        if (from >= to || bsize >= PAGE_SIZE)
 826                return;
 827        /* Page straddling @from will not have any hole block created? */
 828        rounded_from = round_up(from, bsize);
 829        if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
 830                return;
 831
 832        folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
 833        /* Folio not cached? Nothing to do */
 834        if (IS_ERR(folio))
 835                return;
 836        /*
 837         * See folio_clear_dirty_for_io() for details why folio_mark_dirty()
 838         * is needed.
 839         */
 840        if (folio_mkclean(folio))
 841                folio_mark_dirty(folio);
 842
 843        /*
 844         * The post-eof range of the folio must be zeroed before it is exposed
 845         * to the file. Writeback normally does this, but since i_size has been
 846         * increased we handle it here.
 847         */
 848        if (folio_test_dirty(folio)) {
 849                unsigned int offset, end;
 850
 851                offset = from - folio_pos(folio);
 852                end = min_t(unsigned int, to - folio_pos(folio),
 853                            folio_size(folio));
 854                folio_zero_segment(folio, offset, end);
 855        }
 856
 857        folio_unlock(folio);
 858        folio_put(folio);
 859}
 860EXPORT_SYMBOL(pagecache_isize_extended);
 861
 862/**
 863 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 864 * @inode: inode
 865 * @lstart: offset of beginning of hole
 866 * @lend: offset of last byte of hole
 867 *
 868 * This function should typically be called before the filesystem
 869 * releases resources associated with the freed range (eg. deallocates
 870 * blocks). This way, pagecache will always stay logically coherent
 871 * with on-disk format, and the filesystem would not have to deal with
 872 * situations such as writepage being called for a page that has already
 873 * had its underlying blocks deallocated.
 874 */
 875void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
 876{
 877        struct address_space *mapping = inode->i_mapping;
 878        loff_t unmap_start = round_up(lstart, PAGE_SIZE);
 879        loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
 880        /*
 881         * This rounding is currently just for example: unmap_mapping_range
 882         * expands its hole outwards, whereas we want it to contract the hole
 883         * inwards.  However, existing callers of truncate_pagecache_range are
 884         * doing their own page rounding first.  Note that unmap_mapping_range
 885         * allows holelen 0 for all, and we allow lend -1 for end of file.
 886         */
 887
 888        /*
 889         * Unlike in truncate_pagecache, unmap_mapping_range is called only
 890         * once (before truncating pagecache), and without "even_cows" flag:
 891         * hole-punching should not remove private COWed pages from the hole.
 892         */
 893        if ((u64)unmap_end > (u64)unmap_start)
 894                unmap_mapping_range(mapping, unmap_start,
 895                                    1 + unmap_end - unmap_start, 0);
 896        truncate_inode_pages_range(mapping, lstart, lend);
 897}
 898EXPORT_SYMBOL(truncate_pagecache_range);
 899