linux/include/linux/pagemap.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_PAGEMAP_H
   3#define _LINUX_PAGEMAP_H
   4
   5/*
   6 * Copyright 1995 Linus Torvalds
   7 */
   8#include <linux/mm.h>
   9#include <linux/fs.h>
  10#include <linux/list.h>
  11#include <linux/highmem.h>
  12#include <linux/compiler.h>
  13#include <linux/uaccess.h>
  14#include <linux/gfp.h>
  15#include <linux/bitops.h>
  16#include <linux/hardirq.h> /* for in_interrupt() */
  17#include <linux/hugetlb_inline.h>
  18
  19struct pagevec;
  20
  21/*
  22 * Bits in mapping->flags.
  23 */
  24enum mapping_flags {
  25        AS_EIO          = 0,    /* IO error on async write */
  26        AS_ENOSPC       = 1,    /* ENOSPC on async write */
  27        AS_MM_ALL_LOCKS = 2,    /* under mm_take_all_locks() */
  28        AS_UNEVICTABLE  = 3,    /* e.g., ramdisk, SHM_LOCK */
  29        AS_EXITING      = 4,    /* final truncate in progress */
  30        /* writeback related tags are not used */
  31        AS_NO_WRITEBACK_TAGS = 5,
  32};
  33
  34/**
  35 * mapping_set_error - record a writeback error in the address_space
  36 * @mapping - the mapping in which an error should be set
  37 * @error - the error to set in the mapping
  38 *
  39 * When writeback fails in some way, we must record that error so that
  40 * userspace can be informed when fsync and the like are called.  We endeavor
  41 * to report errors on any file that was open at the time of the error.  Some
  42 * internal callers also need to know when writeback errors have occurred.
  43 *
  44 * When a writeback error occurs, most filesystems will want to call
  45 * mapping_set_error to record the error in the mapping so that it can be
  46 * reported when the application calls fsync(2).
  47 */
  48static inline void mapping_set_error(struct address_space *mapping, int error)
  49{
  50        if (likely(!error))
  51                return;
  52
  53        /* Record in wb_err for checkers using errseq_t based tracking */
  54        filemap_set_wb_err(mapping, error);
  55
  56        /* Record it in flags for now, for legacy callers */
  57        if (error == -ENOSPC)
  58                set_bit(AS_ENOSPC, &mapping->flags);
  59        else
  60                set_bit(AS_EIO, &mapping->flags);
  61}
  62
  63static inline void mapping_set_unevictable(struct address_space *mapping)
  64{
  65        set_bit(AS_UNEVICTABLE, &mapping->flags);
  66}
  67
  68static inline void mapping_clear_unevictable(struct address_space *mapping)
  69{
  70        clear_bit(AS_UNEVICTABLE, &mapping->flags);
  71}
  72
  73static inline int mapping_unevictable(struct address_space *mapping)
  74{
  75        if (mapping)
  76                return test_bit(AS_UNEVICTABLE, &mapping->flags);
  77        return !!mapping;
  78}
  79
  80static inline void mapping_set_exiting(struct address_space *mapping)
  81{
  82        set_bit(AS_EXITING, &mapping->flags);
  83}
  84
  85static inline int mapping_exiting(struct address_space *mapping)
  86{
  87        return test_bit(AS_EXITING, &mapping->flags);
  88}
  89
  90static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  91{
  92        set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  93}
  94
  95static inline int mapping_use_writeback_tags(struct address_space *mapping)
  96{
  97        return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  98}
  99
 100static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 101{
 102        return mapping->gfp_mask;
 103}
 104
 105/* Restricts the given gfp_mask to what the mapping allows. */
 106static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
 107                gfp_t gfp_mask)
 108{
 109        return mapping_gfp_mask(mapping) & gfp_mask;
 110}
 111
 112/*
 113 * This is non-atomic.  Only to be used before the mapping is activated.
 114 * Probably needs a barrier...
 115 */
 116static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 117{
 118        m->gfp_mask = mask;
 119}
 120
 121void release_pages(struct page **pages, int nr);
 122
 123/*
 124 * speculatively take a reference to a page.
 125 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
 126 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
 127 *
 128 * This function must be called inside the same rcu_read_lock() section as has
 129 * been used to lookup the page in the pagecache radix-tree (or page table):
 130 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
 131 *
 132 * Unless an RCU grace period has passed, the count of all pages coming out
 133 * of the allocator must be considered unstable. page_count may return higher
 134 * than expected, and put_page must be able to do the right thing when the
 135 * page has been finished with, no matter what it is subsequently allocated
 136 * for (because put_page is what is used here to drop an invalid speculative
 137 * reference).
 138 *
 139 * This is the interesting part of the lockless pagecache (and lockless
 140 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
 141 * has the following pattern:
 142 * 1. find page in radix tree
 143 * 2. conditionally increment refcount
 144 * 3. check the page is still in pagecache (if no, goto 1)
 145 *
 146 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
 147 * following (with the i_pages lock held):
 148 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
 149 * B. remove page from pagecache
 150 * C. free the page
 151 *
 152 * There are 2 critical interleavings that matter:
 153 * - 2 runs before A: in this case, A sees elevated refcount and bails out
 154 * - A runs before 2: in this case, 2 sees zero refcount and retries;
 155 *   subsequently, B will complete and 1 will find no page, causing the
 156 *   lookup to return NULL.
 157 *
 158 * It is possible that between 1 and 2, the page is removed then the exact same
 159 * page is inserted into the same position in pagecache. That's OK: the
 160 * old find_get_page using a lock could equally have run before or after
 161 * such a re-insertion, depending on order that locks are granted.
 162 *
 163 * Lookups racing against pagecache insertion isn't a big problem: either 1
 164 * will find the page or it will not. Likewise, the old find_get_page could run
 165 * either before the insertion or afterwards, depending on timing.
 166 */
 167static inline int page_cache_get_speculative(struct page *page)
 168{
 169#ifdef CONFIG_TINY_RCU
 170# ifdef CONFIG_PREEMPT_COUNT
 171        VM_BUG_ON(!in_atomic() && !irqs_disabled());
 172# endif
 173        /*
 174         * Preempt must be disabled here - we rely on rcu_read_lock doing
 175         * this for us.
 176         *
 177         * Pagecache won't be truncated from interrupt context, so if we have
 178         * found a page in the radix tree here, we have pinned its refcount by
 179         * disabling preempt, and hence no need for the "speculative get" that
 180         * SMP requires.
 181         */
 182        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 183        page_ref_inc(page);
 184
 185#else
 186        if (unlikely(!get_page_unless_zero(page))) {
 187                /*
 188                 * Either the page has been freed, or will be freed.
 189                 * In either case, retry here and the caller should
 190                 * do the right thing (see comments above).
 191                 */
 192                return 0;
 193        }
 194#endif
 195        VM_BUG_ON_PAGE(PageTail(page), page);
 196
 197        return 1;
 198}
 199
 200/*
 201 * Same as above, but add instead of inc (could just be merged)
 202 */
 203static inline int page_cache_add_speculative(struct page *page, int count)
 204{
 205        VM_BUG_ON(in_interrupt());
 206
 207#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
 208# ifdef CONFIG_PREEMPT_COUNT
 209        VM_BUG_ON(!in_atomic() && !irqs_disabled());
 210# endif
 211        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 212        page_ref_add(page, count);
 213
 214#else
 215        if (unlikely(!page_ref_add_unless(page, count, 0)))
 216                return 0;
 217#endif
 218        VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
 219
 220        return 1;
 221}
 222
 223#ifdef CONFIG_NUMA
 224extern struct page *__page_cache_alloc(gfp_t gfp);
 225#else
 226static inline struct page *__page_cache_alloc(gfp_t gfp)
 227{
 228        return alloc_pages(gfp, 0);
 229}
 230#endif
 231
 232static inline struct page *page_cache_alloc(struct address_space *x)
 233{
 234        return __page_cache_alloc(mapping_gfp_mask(x));
 235}
 236
 237static inline gfp_t readahead_gfp_mask(struct address_space *x)
 238{
 239        return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
 240}
 241
 242typedef int filler_t(void *, struct page *);
 243
 244pgoff_t page_cache_next_miss(struct address_space *mapping,
 245                             pgoff_t index, unsigned long max_scan);
 246pgoff_t page_cache_prev_miss(struct address_space *mapping,
 247                             pgoff_t index, unsigned long max_scan);
 248
 249#define FGP_ACCESSED            0x00000001
 250#define FGP_LOCK                0x00000002
 251#define FGP_CREAT               0x00000004
 252#define FGP_WRITE               0x00000008
 253#define FGP_NOFS                0x00000010
 254#define FGP_NOWAIT              0x00000020
 255
 256struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 257                int fgp_flags, gfp_t cache_gfp_mask);
 258
 259/**
 260 * find_get_page - find and get a page reference
 261 * @mapping: the address_space to search
 262 * @offset: the page index
 263 *
 264 * Looks up the page cache slot at @mapping & @offset.  If there is a
 265 * page cache page, it is returned with an increased refcount.
 266 *
 267 * Otherwise, %NULL is returned.
 268 */
 269static inline struct page *find_get_page(struct address_space *mapping,
 270                                        pgoff_t offset)
 271{
 272        return pagecache_get_page(mapping, offset, 0, 0);
 273}
 274
 275static inline struct page *find_get_page_flags(struct address_space *mapping,
 276                                        pgoff_t offset, int fgp_flags)
 277{
 278        return pagecache_get_page(mapping, offset, fgp_flags, 0);
 279}
 280
 281/**
 282 * find_lock_page - locate, pin and lock a pagecache page
 283 * @mapping: the address_space to search
 284 * @offset: the page index
 285 *
 286 * Looks up the page cache slot at @mapping & @offset.  If there is a
 287 * page cache page, it is returned locked and with an increased
 288 * refcount.
 289 *
 290 * Otherwise, %NULL is returned.
 291 *
 292 * find_lock_page() may sleep.
 293 */
 294static inline struct page *find_lock_page(struct address_space *mapping,
 295                                        pgoff_t offset)
 296{
 297        return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
 298}
 299
 300/**
 301 * find_or_create_page - locate or add a pagecache page
 302 * @mapping: the page's address_space
 303 * @index: the page's index into the mapping
 304 * @gfp_mask: page allocation mode
 305 *
 306 * Looks up the page cache slot at @mapping & @offset.  If there is a
 307 * page cache page, it is returned locked and with an increased
 308 * refcount.
 309 *
 310 * If the page is not present, a new page is allocated using @gfp_mask
 311 * and added to the page cache and the VM's LRU list.  The page is
 312 * returned locked and with an increased refcount.
 313 *
 314 * On memory exhaustion, %NULL is returned.
 315 *
 316 * find_or_create_page() may sleep, even if @gfp_flags specifies an
 317 * atomic allocation!
 318 */
 319static inline struct page *find_or_create_page(struct address_space *mapping,
 320                                        pgoff_t offset, gfp_t gfp_mask)
 321{
 322        return pagecache_get_page(mapping, offset,
 323                                        FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
 324                                        gfp_mask);
 325}
 326
 327/**
 328 * grab_cache_page_nowait - returns locked page at given index in given cache
 329 * @mapping: target address_space
 330 * @index: the page index
 331 *
 332 * Same as grab_cache_page(), but do not wait if the page is unavailable.
 333 * This is intended for speculative data generators, where the data can
 334 * be regenerated if the page couldn't be grabbed.  This routine should
 335 * be safe to call while holding the lock for another page.
 336 *
 337 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
 338 * and deadlock against the caller's locked page.
 339 */
 340static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
 341                                pgoff_t index)
 342{
 343        return pagecache_get_page(mapping, index,
 344                        FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
 345                        mapping_gfp_mask(mapping));
 346}
 347
 348struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
 349struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
 350unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
 351                          unsigned int nr_entries, struct page **entries,
 352                          pgoff_t *indices);
 353unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
 354                        pgoff_t end, unsigned int nr_pages,
 355                        struct page **pages);
 356static inline unsigned find_get_pages(struct address_space *mapping,
 357                        pgoff_t *start, unsigned int nr_pages,
 358                        struct page **pages)
 359{
 360        return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
 361                                    pages);
 362}
 363unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 364                               unsigned int nr_pages, struct page **pages);
 365unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
 366                        pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
 367                        struct page **pages);
 368static inline unsigned find_get_pages_tag(struct address_space *mapping,
 369                        pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
 370                        struct page **pages)
 371{
 372        return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
 373                                        nr_pages, pages);
 374}
 375unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
 376                        xa_mark_t tag, unsigned int nr_entries,
 377                        struct page **entries, pgoff_t *indices);
 378
 379struct page *grab_cache_page_write_begin(struct address_space *mapping,
 380                        pgoff_t index, unsigned flags);
 381
 382/*
 383 * Returns locked page at given index in given cache, creating it if needed.
 384 */
 385static inline struct page *grab_cache_page(struct address_space *mapping,
 386                                                                pgoff_t index)
 387{
 388        return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 389}
 390
 391extern struct page * read_cache_page(struct address_space *mapping,
 392                                pgoff_t index, filler_t *filler, void *data);
 393extern struct page * read_cache_page_gfp(struct address_space *mapping,
 394                                pgoff_t index, gfp_t gfp_mask);
 395extern int read_cache_pages(struct address_space *mapping,
 396                struct list_head *pages, filler_t *filler, void *data);
 397
 398static inline struct page *read_mapping_page(struct address_space *mapping,
 399                                pgoff_t index, void *data)
 400{
 401        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
 402        return read_cache_page(mapping, index, filler, data);
 403}
 404
 405/*
 406 * Get index of the page with in radix-tree
 407 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
 408 */
 409static inline pgoff_t page_to_index(struct page *page)
 410{
 411        pgoff_t pgoff;
 412
 413        if (likely(!PageTransTail(page)))
 414                return page->index;
 415
 416        /*
 417         *  We don't initialize ->index for tail pages: calculate based on
 418         *  head page
 419         */
 420        pgoff = compound_head(page)->index;
 421        pgoff += page - compound_head(page);
 422        return pgoff;
 423}
 424
 425/*
 426 * Get the offset in PAGE_SIZE.
 427 * (TODO: hugepage should have ->index in PAGE_SIZE)
 428 */
 429static inline pgoff_t page_to_pgoff(struct page *page)
 430{
 431        if (unlikely(PageHeadHuge(page)))
 432                return page->index << compound_order(page);
 433
 434        return page_to_index(page);
 435}
 436
 437/*
 438 * Return byte-offset into filesystem object for page.
 439 */
 440static inline loff_t page_offset(struct page *page)
 441{
 442        return ((loff_t)page->index) << PAGE_SHIFT;
 443}
 444
 445static inline loff_t page_file_offset(struct page *page)
 446{
 447        return ((loff_t)page_index(page)) << PAGE_SHIFT;
 448}
 449
 450extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 451                                     unsigned long address);
 452
 453static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 454                                        unsigned long address)
 455{
 456        pgoff_t pgoff;
 457        if (unlikely(is_vm_hugetlb_page(vma)))
 458                return linear_hugepage_index(vma, address);
 459        pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 460        pgoff += vma->vm_pgoff;
 461        return pgoff;
 462}
 463
 464extern void __lock_page(struct page *page);
 465extern int __lock_page_killable(struct page *page);
 466extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 467                                unsigned int flags);
 468extern void unlock_page(struct page *page);
 469
 470static inline int trylock_page(struct page *page)
 471{
 472        page = compound_head(page);
 473        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 474}
 475
 476/*
 477 * lock_page may only be called if we have the page's inode pinned.
 478 */
 479static inline void lock_page(struct page *page)
 480{
 481        might_sleep();
 482        if (!trylock_page(page))
 483                __lock_page(page);
 484}
 485
 486/*
 487 * lock_page_killable is like lock_page but can be interrupted by fatal
 488 * signals.  It returns 0 if it locked the page and -EINTR if it was
 489 * killed while waiting.
 490 */
 491static inline int lock_page_killable(struct page *page)
 492{
 493        might_sleep();
 494        if (!trylock_page(page))
 495                return __lock_page_killable(page);
 496        return 0;
 497}
 498
 499/*
 500 * lock_page_or_retry - Lock the page, unless this would block and the
 501 * caller indicated that it can handle a retry.
 502 *
 503 * Return value and mmap_sem implications depend on flags; see
 504 * __lock_page_or_retry().
 505 */
 506static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 507                                     unsigned int flags)
 508{
 509        might_sleep();
 510        return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
 511}
 512
 513/*
 514 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
 515 * and should not be used directly.
 516 */
 517extern void wait_on_page_bit(struct page *page, int bit_nr);
 518extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 519
 520/* 
 521 * Wait for a page to be unlocked.
 522 *
 523 * This must be called with the caller "holding" the page,
 524 * ie with increased "page->count" so that the page won't
 525 * go away during the wait..
 526 */
 527static inline void wait_on_page_locked(struct page *page)
 528{
 529        if (PageLocked(page))
 530                wait_on_page_bit(compound_head(page), PG_locked);
 531}
 532
 533static inline int wait_on_page_locked_killable(struct page *page)
 534{
 535        if (!PageLocked(page))
 536                return 0;
 537        return wait_on_page_bit_killable(compound_head(page), PG_locked);
 538}
 539
 540/* 
 541 * Wait for a page to complete writeback
 542 */
 543static inline void wait_on_page_writeback(struct page *page)
 544{
 545        if (PageWriteback(page))
 546                wait_on_page_bit(page, PG_writeback);
 547}
 548
 549extern void end_page_writeback(struct page *page);
 550void wait_for_stable_page(struct page *page);
 551
 552void page_endio(struct page *page, bool is_write, int err);
 553
 554/*
 555 * Add an arbitrary waiter to a page's wait queue
 556 */
 557extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
 558
 559/*
 560 * Fault everything in given userspace address range in.
 561 */
 562static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 563{
 564        char __user *end = uaddr + size - 1;
 565
 566        if (unlikely(size == 0))
 567                return 0;
 568
 569        if (unlikely(uaddr > end))
 570                return -EFAULT;
 571        /*
 572         * Writing zeroes into userspace here is OK, because we know that if
 573         * the zero gets there, we'll be overwriting it.
 574         */
 575        do {
 576                if (unlikely(__put_user(0, uaddr) != 0))
 577                        return -EFAULT;
 578                uaddr += PAGE_SIZE;
 579        } while (uaddr <= end);
 580
 581        /* Check whether the range spilled into the next page. */
 582        if (((unsigned long)uaddr & PAGE_MASK) ==
 583                        ((unsigned long)end & PAGE_MASK))
 584                return __put_user(0, end);
 585
 586        return 0;
 587}
 588
 589static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 590{
 591        volatile char c;
 592        const char __user *end = uaddr + size - 1;
 593
 594        if (unlikely(size == 0))
 595                return 0;
 596
 597        if (unlikely(uaddr > end))
 598                return -EFAULT;
 599
 600        do {
 601                if (unlikely(__get_user(c, uaddr) != 0))
 602                        return -EFAULT;
 603                uaddr += PAGE_SIZE;
 604        } while (uaddr <= end);
 605
 606        /* Check whether the range spilled into the next page. */
 607        if (((unsigned long)uaddr & PAGE_MASK) ==
 608                        ((unsigned long)end & PAGE_MASK)) {
 609                return __get_user(c, end);
 610        }
 611
 612        (void)c;
 613        return 0;
 614}
 615
 616int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 617                                pgoff_t index, gfp_t gfp_mask);
 618int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 619                                pgoff_t index, gfp_t gfp_mask);
 620extern void delete_from_page_cache(struct page *page);
 621extern void __delete_from_page_cache(struct page *page, void *shadow);
 622int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 623void delete_from_page_cache_batch(struct address_space *mapping,
 624                                  struct pagevec *pvec);
 625
 626/*
 627 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 628 * the page is new, so we can just run __SetPageLocked() against it.
 629 */
 630static inline int add_to_page_cache(struct page *page,
 631                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 632{
 633        int error;
 634
 635        __SetPageLocked(page);
 636        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 637        if (unlikely(error))
 638                __ClearPageLocked(page);
 639        return error;
 640}
 641
 642static inline unsigned long dir_pages(struct inode *inode)
 643{
 644        return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
 645                               PAGE_SHIFT;
 646}
 647
 648#endif /* _LINUX_PAGEMAP_H */
 649