linux/include/linux/pagemap.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_PAGEMAP_H
   3#define _LINUX_PAGEMAP_H
   4
   5/*
   6 * Copyright 1995 Linus Torvalds
   7 */
   8#include <linux/mm.h>
   9#include <linux/fs.h>
  10#include <linux/list.h>
  11#include <linux/highmem.h>
  12#include <linux/compiler.h>
  13#include <linux/uaccess.h>
  14#include <linux/gfp.h>
  15#include <linux/bitops.h>
  16#include <linux/hardirq.h> /* for in_interrupt() */
  17#include <linux/hugetlb_inline.h>
  18
  19struct pagevec;
  20
  21/*
  22 * Bits in mapping->flags.
  23 */
  24enum mapping_flags {
  25        AS_EIO          = 0,    /* IO error on async write */
  26        AS_ENOSPC       = 1,    /* ENOSPC on async write */
  27        AS_MM_ALL_LOCKS = 2,    /* under mm_take_all_locks() */
  28        AS_UNEVICTABLE  = 3,    /* e.g., ramdisk, SHM_LOCK */
  29        AS_EXITING      = 4,    /* final truncate in progress */
  30        /* writeback related tags are not used */
  31        AS_NO_WRITEBACK_TAGS = 5,
  32};
  33
  34/**
  35 * mapping_set_error - record a writeback error in the address_space
  36 * @mapping - the mapping in which an error should be set
  37 * @error - the error to set in the mapping
  38 *
  39 * When writeback fails in some way, we must record that error so that
  40 * userspace can be informed when fsync and the like are called.  We endeavor
  41 * to report errors on any file that was open at the time of the error.  Some
  42 * internal callers also need to know when writeback errors have occurred.
  43 *
  44 * When a writeback error occurs, most filesystems will want to call
  45 * mapping_set_error to record the error in the mapping so that it can be
  46 * reported when the application calls fsync(2).
  47 */
  48static inline void mapping_set_error(struct address_space *mapping, int error)
  49{
  50        if (likely(!error))
  51                return;
  52
  53        /* Record in wb_err for checkers using errseq_t based tracking */
  54        filemap_set_wb_err(mapping, error);
  55
  56        /* Record it in flags for now, for legacy callers */
  57        if (error == -ENOSPC)
  58                set_bit(AS_ENOSPC, &mapping->flags);
  59        else
  60                set_bit(AS_EIO, &mapping->flags);
  61}
  62
  63static inline void mapping_set_unevictable(struct address_space *mapping)
  64{
  65        set_bit(AS_UNEVICTABLE, &mapping->flags);
  66}
  67
  68static inline void mapping_clear_unevictable(struct address_space *mapping)
  69{
  70        clear_bit(AS_UNEVICTABLE, &mapping->flags);
  71}
  72
  73static inline int mapping_unevictable(struct address_space *mapping)
  74{
  75        if (mapping)
  76                return test_bit(AS_UNEVICTABLE, &mapping->flags);
  77        return !!mapping;
  78}
  79
  80static inline void mapping_set_exiting(struct address_space *mapping)
  81{
  82        set_bit(AS_EXITING, &mapping->flags);
  83}
  84
  85static inline int mapping_exiting(struct address_space *mapping)
  86{
  87        return test_bit(AS_EXITING, &mapping->flags);
  88}
  89
  90static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  91{
  92        set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  93}
  94
  95static inline int mapping_use_writeback_tags(struct address_space *mapping)
  96{
  97        return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  98}
  99
 100static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 101{
 102        return mapping->gfp_mask;
 103}
 104
 105/* Restricts the given gfp_mask to what the mapping allows. */
 106static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
 107                gfp_t gfp_mask)
 108{
 109        return mapping_gfp_mask(mapping) & gfp_mask;
 110}
 111
 112/*
 113 * This is non-atomic.  Only to be used before the mapping is activated.
 114 * Probably needs a barrier...
 115 */
 116static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 117{
 118        m->gfp_mask = mask;
 119}
 120
 121void release_pages(struct page **pages, int nr);
 122
 123/*
 124 * speculatively take a reference to a page.
 125 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
 126 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
 127 *
 128 * This function must be called inside the same rcu_read_lock() section as has
 129 * been used to lookup the page in the pagecache radix-tree (or page table):
 130 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
 131 *
 132 * Unless an RCU grace period has passed, the count of all pages coming out
 133 * of the allocator must be considered unstable. page_count may return higher
 134 * than expected, and put_page must be able to do the right thing when the
 135 * page has been finished with, no matter what it is subsequently allocated
 136 * for (because put_page is what is used here to drop an invalid speculative
 137 * reference).
 138 *
 139 * This is the interesting part of the lockless pagecache (and lockless
 140 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
 141 * has the following pattern:
 142 * 1. find page in radix tree
 143 * 2. conditionally increment refcount
 144 * 3. check the page is still in pagecache (if no, goto 1)
 145 *
 146 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
 147 * following (with the i_pages lock held):
 148 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
 149 * B. remove page from pagecache
 150 * C. free the page
 151 *
 152 * There are 2 critical interleavings that matter:
 153 * - 2 runs before A: in this case, A sees elevated refcount and bails out
 154 * - A runs before 2: in this case, 2 sees zero refcount and retries;
 155 *   subsequently, B will complete and 1 will find no page, causing the
 156 *   lookup to return NULL.
 157 *
 158 * It is possible that between 1 and 2, the page is removed then the exact same
 159 * page is inserted into the same position in pagecache. That's OK: the
 160 * old find_get_page using a lock could equally have run before or after
 161 * such a re-insertion, depending on order that locks are granted.
 162 *
 163 * Lookups racing against pagecache insertion isn't a big problem: either 1
 164 * will find the page or it will not. Likewise, the old find_get_page could run
 165 * either before the insertion or afterwards, depending on timing.
 166 */
 167static inline int __page_cache_add_speculative(struct page *page, int count)
 168{
 169#ifdef CONFIG_TINY_RCU
 170# ifdef CONFIG_PREEMPT_COUNT
 171        VM_BUG_ON(!in_atomic() && !irqs_disabled());
 172# endif
 173        /*
 174         * Preempt must be disabled here - we rely on rcu_read_lock doing
 175         * this for us.
 176         *
 177         * Pagecache won't be truncated from interrupt context, so if we have
 178         * found a page in the radix tree here, we have pinned its refcount by
 179         * disabling preempt, and hence no need for the "speculative get" that
 180         * SMP requires.
 181         */
 182        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 183        page_ref_add(page, count);
 184
 185#else
 186        if (unlikely(!page_ref_add_unless(page, count, 0))) {
 187                /*
 188                 * Either the page has been freed, or will be freed.
 189                 * In either case, retry here and the caller should
 190                 * do the right thing (see comments above).
 191                 */
 192                return 0;
 193        }
 194#endif
 195        VM_BUG_ON_PAGE(PageTail(page), page);
 196
 197        return 1;
 198}
 199
 200static inline int page_cache_get_speculative(struct page *page)
 201{
 202        return __page_cache_add_speculative(page, 1);
 203}
 204
 205static inline int page_cache_add_speculative(struct page *page, int count)
 206{
 207        return __page_cache_add_speculative(page, count);
 208}
 209
 210#ifdef CONFIG_NUMA
 211extern struct page *__page_cache_alloc(gfp_t gfp);
 212#else
 213static inline struct page *__page_cache_alloc(gfp_t gfp)
 214{
 215        return alloc_pages(gfp, 0);
 216}
 217#endif
 218
 219static inline struct page *page_cache_alloc(struct address_space *x)
 220{
 221        return __page_cache_alloc(mapping_gfp_mask(x));
 222}
 223
 224static inline gfp_t readahead_gfp_mask(struct address_space *x)
 225{
 226        return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
 227}
 228
 229typedef int filler_t(void *, struct page *);
 230
 231pgoff_t page_cache_next_miss(struct address_space *mapping,
 232                             pgoff_t index, unsigned long max_scan);
 233pgoff_t page_cache_prev_miss(struct address_space *mapping,
 234                             pgoff_t index, unsigned long max_scan);
 235
 236#define FGP_ACCESSED            0x00000001
 237#define FGP_LOCK                0x00000002
 238#define FGP_CREAT               0x00000004
 239#define FGP_WRITE               0x00000008
 240#define FGP_NOFS                0x00000010
 241#define FGP_NOWAIT              0x00000020
 242#define FGP_FOR_MMAP            0x00000040
 243
 244struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 245                int fgp_flags, gfp_t cache_gfp_mask);
 246
 247/**
 248 * find_get_page - find and get a page reference
 249 * @mapping: the address_space to search
 250 * @offset: the page index
 251 *
 252 * Looks up the page cache slot at @mapping & @offset.  If there is a
 253 * page cache page, it is returned with an increased refcount.
 254 *
 255 * Otherwise, %NULL is returned.
 256 */
 257static inline struct page *find_get_page(struct address_space *mapping,
 258                                        pgoff_t offset)
 259{
 260        return pagecache_get_page(mapping, offset, 0, 0);
 261}
 262
 263static inline struct page *find_get_page_flags(struct address_space *mapping,
 264                                        pgoff_t offset, int fgp_flags)
 265{
 266        return pagecache_get_page(mapping, offset, fgp_flags, 0);
 267}
 268
 269/**
 270 * find_lock_page - locate, pin and lock a pagecache page
 271 * @mapping: the address_space to search
 272 * @offset: the page index
 273 *
 274 * Looks up the page cache slot at @mapping & @offset.  If there is a
 275 * page cache page, it is returned locked and with an increased
 276 * refcount.
 277 *
 278 * Otherwise, %NULL is returned.
 279 *
 280 * find_lock_page() may sleep.
 281 */
 282static inline struct page *find_lock_page(struct address_space *mapping,
 283                                        pgoff_t offset)
 284{
 285        return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
 286}
 287
 288/**
 289 * find_or_create_page - locate or add a pagecache page
 290 * @mapping: the page's address_space
 291 * @index: the page's index into the mapping
 292 * @gfp_mask: page allocation mode
 293 *
 294 * Looks up the page cache slot at @mapping & @offset.  If there is a
 295 * page cache page, it is returned locked and with an increased
 296 * refcount.
 297 *
 298 * If the page is not present, a new page is allocated using @gfp_mask
 299 * and added to the page cache and the VM's LRU list.  The page is
 300 * returned locked and with an increased refcount.
 301 *
 302 * On memory exhaustion, %NULL is returned.
 303 *
 304 * find_or_create_page() may sleep, even if @gfp_flags specifies an
 305 * atomic allocation!
 306 */
 307static inline struct page *find_or_create_page(struct address_space *mapping,
 308                                        pgoff_t offset, gfp_t gfp_mask)
 309{
 310        return pagecache_get_page(mapping, offset,
 311                                        FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
 312                                        gfp_mask);
 313}
 314
 315/**
 316 * grab_cache_page_nowait - returns locked page at given index in given cache
 317 * @mapping: target address_space
 318 * @index: the page index
 319 *
 320 * Same as grab_cache_page(), but do not wait if the page is unavailable.
 321 * This is intended for speculative data generators, where the data can
 322 * be regenerated if the page couldn't be grabbed.  This routine should
 323 * be safe to call while holding the lock for another page.
 324 *
 325 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
 326 * and deadlock against the caller's locked page.
 327 */
 328static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
 329                                pgoff_t index)
 330{
 331        return pagecache_get_page(mapping, index,
 332                        FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
 333                        mapping_gfp_mask(mapping));
 334}
 335
 336static inline struct page *find_subpage(struct page *page, pgoff_t offset)
 337{
 338        if (PageHuge(page))
 339                return page;
 340
 341        VM_BUG_ON_PAGE(PageTail(page), page);
 342
 343        return page + (offset & (compound_nr(page) - 1));
 344}
 345
 346struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
 347struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
 348unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
 349                          unsigned int nr_entries, struct page **entries,
 350                          pgoff_t *indices);
 351unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
 352                        pgoff_t end, unsigned int nr_pages,
 353                        struct page **pages);
 354static inline unsigned find_get_pages(struct address_space *mapping,
 355                        pgoff_t *start, unsigned int nr_pages,
 356                        struct page **pages)
 357{
 358        return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
 359                                    pages);
 360}
 361unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 362                               unsigned int nr_pages, struct page **pages);
 363unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
 364                        pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
 365                        struct page **pages);
 366static inline unsigned find_get_pages_tag(struct address_space *mapping,
 367                        pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
 368                        struct page **pages)
 369{
 370        return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
 371                                        nr_pages, pages);
 372}
 373
 374struct page *grab_cache_page_write_begin(struct address_space *mapping,
 375                        pgoff_t index, unsigned flags);
 376
 377/*
 378 * Returns locked page at given index in given cache, creating it if needed.
 379 */
 380static inline struct page *grab_cache_page(struct address_space *mapping,
 381                                                                pgoff_t index)
 382{
 383        return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 384}
 385
 386extern struct page * read_cache_page(struct address_space *mapping,
 387                                pgoff_t index, filler_t *filler, void *data);
 388extern struct page * read_cache_page_gfp(struct address_space *mapping,
 389                                pgoff_t index, gfp_t gfp_mask);
 390extern int read_cache_pages(struct address_space *mapping,
 391                struct list_head *pages, filler_t *filler, void *data);
 392
 393static inline struct page *read_mapping_page(struct address_space *mapping,
 394                                pgoff_t index, void *data)
 395{
 396        return read_cache_page(mapping, index, NULL, data);
 397}
 398
 399/*
 400 * Get index of the page with in radix-tree
 401 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
 402 */
 403static inline pgoff_t page_to_index(struct page *page)
 404{
 405        pgoff_t pgoff;
 406
 407        if (likely(!PageTransTail(page)))
 408                return page->index;
 409
 410        /*
 411         *  We don't initialize ->index for tail pages: calculate based on
 412         *  head page
 413         */
 414        pgoff = compound_head(page)->index;
 415        pgoff += page - compound_head(page);
 416        return pgoff;
 417}
 418
 419/*
 420 * Get the offset in PAGE_SIZE.
 421 * (TODO: hugepage should have ->index in PAGE_SIZE)
 422 */
 423static inline pgoff_t page_to_pgoff(struct page *page)
 424{
 425        if (unlikely(PageHeadHuge(page)))
 426                return page->index << compound_order(page);
 427
 428        return page_to_index(page);
 429}
 430
 431/*
 432 * Return byte-offset into filesystem object for page.
 433 */
 434static inline loff_t page_offset(struct page *page)
 435{
 436        return ((loff_t)page->index) << PAGE_SHIFT;
 437}
 438
 439static inline loff_t page_file_offset(struct page *page)
 440{
 441        return ((loff_t)page_index(page)) << PAGE_SHIFT;
 442}
 443
 444extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 445                                     unsigned long address);
 446
 447static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 448                                        unsigned long address)
 449{
 450        pgoff_t pgoff;
 451        if (unlikely(is_vm_hugetlb_page(vma)))
 452                return linear_hugepage_index(vma, address);
 453        pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 454        pgoff += vma->vm_pgoff;
 455        return pgoff;
 456}
 457
 458extern void __lock_page(struct page *page);
 459extern int __lock_page_killable(struct page *page);
 460extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 461                                unsigned int flags);
 462extern void unlock_page(struct page *page);
 463
 464/*
 465 * Return true if the page was successfully locked
 466 */
 467static inline int trylock_page(struct page *page)
 468{
 469        page = compound_head(page);
 470        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 471}
 472
 473/*
 474 * lock_page may only be called if we have the page's inode pinned.
 475 */
 476static inline void lock_page(struct page *page)
 477{
 478        might_sleep();
 479        if (!trylock_page(page))
 480                __lock_page(page);
 481}
 482
 483/*
 484 * lock_page_killable is like lock_page but can be interrupted by fatal
 485 * signals.  It returns 0 if it locked the page and -EINTR if it was
 486 * killed while waiting.
 487 */
 488static inline int lock_page_killable(struct page *page)
 489{
 490        might_sleep();
 491        if (!trylock_page(page))
 492                return __lock_page_killable(page);
 493        return 0;
 494}
 495
 496/*
 497 * lock_page_or_retry - Lock the page, unless this would block and the
 498 * caller indicated that it can handle a retry.
 499 *
 500 * Return value and mmap_sem implications depend on flags; see
 501 * __lock_page_or_retry().
 502 */
 503static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 504                                     unsigned int flags)
 505{
 506        might_sleep();
 507        return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
 508}
 509
 510/*
 511 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
 512 * and should not be used directly.
 513 */
 514extern void wait_on_page_bit(struct page *page, int bit_nr);
 515extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 516
 517/* 
 518 * Wait for a page to be unlocked.
 519 *
 520 * This must be called with the caller "holding" the page,
 521 * ie with increased "page->count" so that the page won't
 522 * go away during the wait..
 523 */
 524static inline void wait_on_page_locked(struct page *page)
 525{
 526        if (PageLocked(page))
 527                wait_on_page_bit(compound_head(page), PG_locked);
 528}
 529
 530static inline int wait_on_page_locked_killable(struct page *page)
 531{
 532        if (!PageLocked(page))
 533                return 0;
 534        return wait_on_page_bit_killable(compound_head(page), PG_locked);
 535}
 536
 537extern void put_and_wait_on_page_locked(struct page *page);
 538
 539void wait_on_page_writeback(struct page *page);
 540extern void end_page_writeback(struct page *page);
 541void wait_for_stable_page(struct page *page);
 542
 543void page_endio(struct page *page, bool is_write, int err);
 544
 545/*
 546 * Add an arbitrary waiter to a page's wait queue
 547 */
 548extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
 549
 550/*
 551 * Fault everything in given userspace address range in.
 552 */
 553static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 554{
 555        char __user *end = uaddr + size - 1;
 556
 557        if (unlikely(size == 0))
 558                return 0;
 559
 560        if (unlikely(uaddr > end))
 561                return -EFAULT;
 562        /*
 563         * Writing zeroes into userspace here is OK, because we know that if
 564         * the zero gets there, we'll be overwriting it.
 565         */
 566        do {
 567                if (unlikely(__put_user(0, uaddr) != 0))
 568                        return -EFAULT;
 569                uaddr += PAGE_SIZE;
 570        } while (uaddr <= end);
 571
 572        /* Check whether the range spilled into the next page. */
 573        if (((unsigned long)uaddr & PAGE_MASK) ==
 574                        ((unsigned long)end & PAGE_MASK))
 575                return __put_user(0, end);
 576
 577        return 0;
 578}
 579
 580static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 581{
 582        volatile char c;
 583        const char __user *end = uaddr + size - 1;
 584
 585        if (unlikely(size == 0))
 586                return 0;
 587
 588        if (unlikely(uaddr > end))
 589                return -EFAULT;
 590
 591        do {
 592                if (unlikely(__get_user(c, uaddr) != 0))
 593                        return -EFAULT;
 594                uaddr += PAGE_SIZE;
 595        } while (uaddr <= end);
 596
 597        /* Check whether the range spilled into the next page. */
 598        if (((unsigned long)uaddr & PAGE_MASK) ==
 599                        ((unsigned long)end & PAGE_MASK)) {
 600                return __get_user(c, end);
 601        }
 602
 603        (void)c;
 604        return 0;
 605}
 606
 607int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 608                                pgoff_t index, gfp_t gfp_mask);
 609int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 610                                pgoff_t index, gfp_t gfp_mask);
 611extern void delete_from_page_cache(struct page *page);
 612extern void __delete_from_page_cache(struct page *page, void *shadow);
 613int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 614void delete_from_page_cache_batch(struct address_space *mapping,
 615                                  struct pagevec *pvec);
 616
 617/*
 618 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 619 * the page is new, so we can just run __SetPageLocked() against it.
 620 */
 621static inline int add_to_page_cache(struct page *page,
 622                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 623{
 624        int error;
 625
 626        __SetPageLocked(page);
 627        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 628        if (unlikely(error))
 629                __ClearPageLocked(page);
 630        return error;
 631}
 632
 633static inline unsigned long dir_pages(struct inode *inode)
 634{
 635        return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
 636                               PAGE_SHIFT;
 637}
 638
 639#endif /* _LINUX_PAGEMAP_H */
 640