linux/include/linux/pagemap.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_PAGEMAP_H
   3#define _LINUX_PAGEMAP_H
   4
   5/*
   6 * Copyright 1995 Linus Torvalds
   7 */
   8#include <linux/mm.h>
   9#include <linux/fs.h>
  10#include <linux/list.h>
  11#include <linux/highmem.h>
  12#include <linux/compiler.h>
  13#include <linux/uaccess.h>
  14#include <linux/gfp.h>
  15#include <linux/bitops.h>
  16#include <linux/hardirq.h> /* for in_interrupt() */
  17#include <linux/hugetlb_inline.h>
  18
  19struct pagevec;
  20
  21static inline bool mapping_empty(struct address_space *mapping)
  22{
  23        return xa_empty(&mapping->i_pages);
  24}
  25
  26/*
  27 * Bits in mapping->flags.
  28 */
  29enum mapping_flags {
  30        AS_EIO          = 0,    /* IO error on async write */
  31        AS_ENOSPC       = 1,    /* ENOSPC on async write */
  32        AS_MM_ALL_LOCKS = 2,    /* under mm_take_all_locks() */
  33        AS_UNEVICTABLE  = 3,    /* e.g., ramdisk, SHM_LOCK */
  34        AS_EXITING      = 4,    /* final truncate in progress */
  35        /* writeback related tags are not used */
  36        AS_NO_WRITEBACK_TAGS = 5,
  37        AS_THP_SUPPORT = 6,     /* THPs supported */
  38};
  39
  40/**
  41 * mapping_set_error - record a writeback error in the address_space
  42 * @mapping: the mapping in which an error should be set
  43 * @error: the error to set in the mapping
  44 *
  45 * When writeback fails in some way, we must record that error so that
  46 * userspace can be informed when fsync and the like are called.  We endeavor
  47 * to report errors on any file that was open at the time of the error.  Some
  48 * internal callers also need to know when writeback errors have occurred.
  49 *
  50 * When a writeback error occurs, most filesystems will want to call
  51 * mapping_set_error to record the error in the mapping so that it can be
  52 * reported when the application calls fsync(2).
  53 */
  54static inline void mapping_set_error(struct address_space *mapping, int error)
  55{
  56        if (likely(!error))
  57                return;
  58
  59        /* Record in wb_err for checkers using errseq_t based tracking */
  60        __filemap_set_wb_err(mapping, error);
  61
  62        /* Record it in superblock */
  63        if (mapping->host)
  64                errseq_set(&mapping->host->i_sb->s_wb_err, error);
  65
  66        /* Record it in flags for now, for legacy callers */
  67        if (error == -ENOSPC)
  68                set_bit(AS_ENOSPC, &mapping->flags);
  69        else
  70                set_bit(AS_EIO, &mapping->flags);
  71}
  72
  73static inline void mapping_set_unevictable(struct address_space *mapping)
  74{
  75        set_bit(AS_UNEVICTABLE, &mapping->flags);
  76}
  77
  78static inline void mapping_clear_unevictable(struct address_space *mapping)
  79{
  80        clear_bit(AS_UNEVICTABLE, &mapping->flags);
  81}
  82
  83static inline bool mapping_unevictable(struct address_space *mapping)
  84{
  85        return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
  86}
  87
  88static inline void mapping_set_exiting(struct address_space *mapping)
  89{
  90        set_bit(AS_EXITING, &mapping->flags);
  91}
  92
  93static inline int mapping_exiting(struct address_space *mapping)
  94{
  95        return test_bit(AS_EXITING, &mapping->flags);
  96}
  97
  98static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  99{
 100        set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
 101}
 102
 103static inline int mapping_use_writeback_tags(struct address_space *mapping)
 104{
 105        return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
 106}
 107
 108static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 109{
 110        return mapping->gfp_mask;
 111}
 112
 113/* Restricts the given gfp_mask to what the mapping allows. */
 114static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
 115                gfp_t gfp_mask)
 116{
 117        return mapping_gfp_mask(mapping) & gfp_mask;
 118}
 119
 120/*
 121 * This is non-atomic.  Only to be used before the mapping is activated.
 122 * Probably needs a barrier...
 123 */
 124static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 125{
 126        m->gfp_mask = mask;
 127}
 128
 129static inline bool mapping_thp_support(struct address_space *mapping)
 130{
 131        return test_bit(AS_THP_SUPPORT, &mapping->flags);
 132}
 133
 134static inline int filemap_nr_thps(struct address_space *mapping)
 135{
 136#ifdef CONFIG_READ_ONLY_THP_FOR_FS
 137        return atomic_read(&mapping->nr_thps);
 138#else
 139        return 0;
 140#endif
 141}
 142
 143static inline void filemap_nr_thps_inc(struct address_space *mapping)
 144{
 145#ifdef CONFIG_READ_ONLY_THP_FOR_FS
 146        if (!mapping_thp_support(mapping))
 147                atomic_inc(&mapping->nr_thps);
 148#else
 149        WARN_ON_ONCE(1);
 150#endif
 151}
 152
 153static inline void filemap_nr_thps_dec(struct address_space *mapping)
 154{
 155#ifdef CONFIG_READ_ONLY_THP_FOR_FS
 156        if (!mapping_thp_support(mapping))
 157                atomic_dec(&mapping->nr_thps);
 158#else
 159        WARN_ON_ONCE(1);
 160#endif
 161}
 162
 163void release_pages(struct page **pages, int nr);
 164
 165/*
 166 * For file cache pages, return the address_space, otherwise return NULL
 167 */
 168static inline struct address_space *page_mapping_file(struct page *page)
 169{
 170        if (unlikely(PageSwapCache(page)))
 171                return NULL;
 172        return page_mapping(page);
 173}
 174
 175/*
 176 * speculatively take a reference to a page.
 177 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
 178 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
 179 *
 180 * This function must be called inside the same rcu_read_lock() section as has
 181 * been used to lookup the page in the pagecache radix-tree (or page table):
 182 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
 183 *
 184 * Unless an RCU grace period has passed, the count of all pages coming out
 185 * of the allocator must be considered unstable. page_count may return higher
 186 * than expected, and put_page must be able to do the right thing when the
 187 * page has been finished with, no matter what it is subsequently allocated
 188 * for (because put_page is what is used here to drop an invalid speculative
 189 * reference).
 190 *
 191 * This is the interesting part of the lockless pagecache (and lockless
 192 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
 193 * has the following pattern:
 194 * 1. find page in radix tree
 195 * 2. conditionally increment refcount
 196 * 3. check the page is still in pagecache (if no, goto 1)
 197 *
 198 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
 199 * following (with the i_pages lock held):
 200 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
 201 * B. remove page from pagecache
 202 * C. free the page
 203 *
 204 * There are 2 critical interleavings that matter:
 205 * - 2 runs before A: in this case, A sees elevated refcount and bails out
 206 * - A runs before 2: in this case, 2 sees zero refcount and retries;
 207 *   subsequently, B will complete and 1 will find no page, causing the
 208 *   lookup to return NULL.
 209 *
 210 * It is possible that between 1 and 2, the page is removed then the exact same
 211 * page is inserted into the same position in pagecache. That's OK: the
 212 * old find_get_page using a lock could equally have run before or after
 213 * such a re-insertion, depending on order that locks are granted.
 214 *
 215 * Lookups racing against pagecache insertion isn't a big problem: either 1
 216 * will find the page or it will not. Likewise, the old find_get_page could run
 217 * either before the insertion or afterwards, depending on timing.
 218 */
 219static inline int __page_cache_add_speculative(struct page *page, int count)
 220{
 221#ifdef CONFIG_TINY_RCU
 222# ifdef CONFIG_PREEMPT_COUNT
 223        VM_BUG_ON(!in_atomic() && !irqs_disabled());
 224# endif
 225        /*
 226         * Preempt must be disabled here - we rely on rcu_read_lock doing
 227         * this for us.
 228         *
 229         * Pagecache won't be truncated from interrupt context, so if we have
 230         * found a page in the radix tree here, we have pinned its refcount by
 231         * disabling preempt, and hence no need for the "speculative get" that
 232         * SMP requires.
 233         */
 234        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 235        page_ref_add(page, count);
 236
 237#else
 238        if (unlikely(!page_ref_add_unless(page, count, 0))) {
 239                /*
 240                 * Either the page has been freed, or will be freed.
 241                 * In either case, retry here and the caller should
 242                 * do the right thing (see comments above).
 243                 */
 244                return 0;
 245        }
 246#endif
 247        VM_BUG_ON_PAGE(PageTail(page), page);
 248
 249        return 1;
 250}
 251
 252static inline int page_cache_get_speculative(struct page *page)
 253{
 254        return __page_cache_add_speculative(page, 1);
 255}
 256
 257static inline int page_cache_add_speculative(struct page *page, int count)
 258{
 259        return __page_cache_add_speculative(page, count);
 260}
 261
 262/**
 263 * attach_page_private - Attach private data to a page.
 264 * @page: Page to attach data to.
 265 * @data: Data to attach to page.
 266 *
 267 * Attaching private data to a page increments the page's reference count.
 268 * The data must be detached before the page will be freed.
 269 */
 270static inline void attach_page_private(struct page *page, void *data)
 271{
 272        get_page(page);
 273        set_page_private(page, (unsigned long)data);
 274        SetPagePrivate(page);
 275}
 276
 277/**
 278 * detach_page_private - Detach private data from a page.
 279 * @page: Page to detach data from.
 280 *
 281 * Removes the data that was previously attached to the page and decrements
 282 * the refcount on the page.
 283 *
 284 * Return: Data that was attached to the page.
 285 */
 286static inline void *detach_page_private(struct page *page)
 287{
 288        void *data = (void *)page_private(page);
 289
 290        if (!PagePrivate(page))
 291                return NULL;
 292        ClearPagePrivate(page);
 293        set_page_private(page, 0);
 294        put_page(page);
 295
 296        return data;
 297}
 298
 299#ifdef CONFIG_NUMA
 300extern struct page *__page_cache_alloc(gfp_t gfp);
 301#else
 302static inline struct page *__page_cache_alloc(gfp_t gfp)
 303{
 304        return alloc_pages(gfp, 0);
 305}
 306#endif
 307
 308static inline struct page *page_cache_alloc(struct address_space *x)
 309{
 310        return __page_cache_alloc(mapping_gfp_mask(x));
 311}
 312
 313static inline gfp_t readahead_gfp_mask(struct address_space *x)
 314{
 315        return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
 316}
 317
 318typedef int filler_t(void *, struct page *);
 319
 320pgoff_t page_cache_next_miss(struct address_space *mapping,
 321                             pgoff_t index, unsigned long max_scan);
 322pgoff_t page_cache_prev_miss(struct address_space *mapping,
 323                             pgoff_t index, unsigned long max_scan);
 324
 325#define FGP_ACCESSED            0x00000001
 326#define FGP_LOCK                0x00000002
 327#define FGP_CREAT               0x00000004
 328#define FGP_WRITE               0x00000008
 329#define FGP_NOFS                0x00000010
 330#define FGP_NOWAIT              0x00000020
 331#define FGP_FOR_MMAP            0x00000040
 332#define FGP_HEAD                0x00000080
 333#define FGP_ENTRY               0x00000100
 334
 335struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 336                int fgp_flags, gfp_t cache_gfp_mask);
 337
 338/**
 339 * find_get_page - find and get a page reference
 340 * @mapping: the address_space to search
 341 * @offset: the page index
 342 *
 343 * Looks up the page cache slot at @mapping & @offset.  If there is a
 344 * page cache page, it is returned with an increased refcount.
 345 *
 346 * Otherwise, %NULL is returned.
 347 */
 348static inline struct page *find_get_page(struct address_space *mapping,
 349                                        pgoff_t offset)
 350{
 351        return pagecache_get_page(mapping, offset, 0, 0);
 352}
 353
 354static inline struct page *find_get_page_flags(struct address_space *mapping,
 355                                        pgoff_t offset, int fgp_flags)
 356{
 357        return pagecache_get_page(mapping, offset, fgp_flags, 0);
 358}
 359
 360/**
 361 * find_lock_page - locate, pin and lock a pagecache page
 362 * @mapping: the address_space to search
 363 * @index: the page index
 364 *
 365 * Looks up the page cache entry at @mapping & @index.  If there is a
 366 * page cache page, it is returned locked and with an increased
 367 * refcount.
 368 *
 369 * Context: May sleep.
 370 * Return: A struct page or %NULL if there is no page in the cache for this
 371 * index.
 372 */
 373static inline struct page *find_lock_page(struct address_space *mapping,
 374                                        pgoff_t index)
 375{
 376        return pagecache_get_page(mapping, index, FGP_LOCK, 0);
 377}
 378
 379/**
 380 * find_lock_head - Locate, pin and lock a pagecache page.
 381 * @mapping: The address_space to search.
 382 * @index: The page index.
 383 *
 384 * Looks up the page cache entry at @mapping & @index.  If there is a
 385 * page cache page, its head page is returned locked and with an increased
 386 * refcount.
 387 *
 388 * Context: May sleep.
 389 * Return: A struct page which is !PageTail, or %NULL if there is no page
 390 * in the cache for this index.
 391 */
 392static inline struct page *find_lock_head(struct address_space *mapping,
 393                                        pgoff_t index)
 394{
 395        return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0);
 396}
 397
 398/**
 399 * find_or_create_page - locate or add a pagecache page
 400 * @mapping: the page's address_space
 401 * @index: the page's index into the mapping
 402 * @gfp_mask: page allocation mode
 403 *
 404 * Looks up the page cache slot at @mapping & @offset.  If there is a
 405 * page cache page, it is returned locked and with an increased
 406 * refcount.
 407 *
 408 * If the page is not present, a new page is allocated using @gfp_mask
 409 * and added to the page cache and the VM's LRU list.  The page is
 410 * returned locked and with an increased refcount.
 411 *
 412 * On memory exhaustion, %NULL is returned.
 413 *
 414 * find_or_create_page() may sleep, even if @gfp_flags specifies an
 415 * atomic allocation!
 416 */
 417static inline struct page *find_or_create_page(struct address_space *mapping,
 418                                        pgoff_t index, gfp_t gfp_mask)
 419{
 420        return pagecache_get_page(mapping, index,
 421                                        FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
 422                                        gfp_mask);
 423}
 424
 425/**
 426 * grab_cache_page_nowait - returns locked page at given index in given cache
 427 * @mapping: target address_space
 428 * @index: the page index
 429 *
 430 * Same as grab_cache_page(), but do not wait if the page is unavailable.
 431 * This is intended for speculative data generators, where the data can
 432 * be regenerated if the page couldn't be grabbed.  This routine should
 433 * be safe to call while holding the lock for another page.
 434 *
 435 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
 436 * and deadlock against the caller's locked page.
 437 */
 438static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
 439                                pgoff_t index)
 440{
 441        return pagecache_get_page(mapping, index,
 442                        FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
 443                        mapping_gfp_mask(mapping));
 444}
 445
 446/* Does this page contain this index? */
 447static inline bool thp_contains(struct page *head, pgoff_t index)
 448{
 449        /* HugeTLBfs indexes the page cache in units of hpage_size */
 450        if (PageHuge(head))
 451                return head->index == index;
 452        return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
 453}
 454
 455/*
 456 * Given the page we found in the page cache, return the page corresponding
 457 * to this index in the file
 458 */
 459static inline struct page *find_subpage(struct page *head, pgoff_t index)
 460{
 461        /* HugeTLBfs wants the head page regardless */
 462        if (PageHuge(head))
 463                return head;
 464
 465        return head + (index & (thp_nr_pages(head) - 1));
 466}
 467
 468unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
 469                pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
 470unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
 471                        pgoff_t end, unsigned int nr_pages,
 472                        struct page **pages);
 473static inline unsigned find_get_pages(struct address_space *mapping,
 474                        pgoff_t *start, unsigned int nr_pages,
 475                        struct page **pages)
 476{
 477        return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
 478                                    pages);
 479}
 480unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 481                               unsigned int nr_pages, struct page **pages);
 482unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
 483                        pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
 484                        struct page **pages);
 485static inline unsigned find_get_pages_tag(struct address_space *mapping,
 486                        pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
 487                        struct page **pages)
 488{
 489        return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
 490                                        nr_pages, pages);
 491}
 492
 493struct page *grab_cache_page_write_begin(struct address_space *mapping,
 494                        pgoff_t index, unsigned flags);
 495
 496/*
 497 * Returns locked page at given index in given cache, creating it if needed.
 498 */
 499static inline struct page *grab_cache_page(struct address_space *mapping,
 500                                                                pgoff_t index)
 501{
 502        return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 503}
 504
 505extern struct page * read_cache_page(struct address_space *mapping,
 506                                pgoff_t index, filler_t *filler, void *data);
 507extern struct page * read_cache_page_gfp(struct address_space *mapping,
 508                                pgoff_t index, gfp_t gfp_mask);
 509extern int read_cache_pages(struct address_space *mapping,
 510                struct list_head *pages, filler_t *filler, void *data);
 511
 512static inline struct page *read_mapping_page(struct address_space *mapping,
 513                                pgoff_t index, void *data)
 514{
 515        return read_cache_page(mapping, index, NULL, data);
 516}
 517
 518/*
 519 * Get index of the page within radix-tree (but not for hugetlb pages).
 520 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
 521 */
 522static inline pgoff_t page_to_index(struct page *page)
 523{
 524        struct page *head;
 525
 526        if (likely(!PageTransTail(page)))
 527                return page->index;
 528
 529        head = compound_head(page);
 530        /*
 531         *  We don't initialize ->index for tail pages: calculate based on
 532         *  head page
 533         */
 534        return head->index + page - head;
 535}
 536
 537extern pgoff_t hugetlb_basepage_index(struct page *page);
 538
 539/*
 540 * Get the offset in PAGE_SIZE (even for hugetlb pages).
 541 * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
 542 */
 543static inline pgoff_t page_to_pgoff(struct page *page)
 544{
 545        if (unlikely(PageHuge(page)))
 546                return hugetlb_basepage_index(page);
 547        return page_to_index(page);
 548}
 549
 550/*
 551 * Return byte-offset into filesystem object for page.
 552 */
 553static inline loff_t page_offset(struct page *page)
 554{
 555        return ((loff_t)page->index) << PAGE_SHIFT;
 556}
 557
 558static inline loff_t page_file_offset(struct page *page)
 559{
 560        return ((loff_t)page_index(page)) << PAGE_SHIFT;
 561}
 562
 563extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 564                                     unsigned long address);
 565
 566static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 567                                        unsigned long address)
 568{
 569        pgoff_t pgoff;
 570        if (unlikely(is_vm_hugetlb_page(vma)))
 571                return linear_hugepage_index(vma, address);
 572        pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 573        pgoff += vma->vm_pgoff;
 574        return pgoff;
 575}
 576
 577struct wait_page_key {
 578        struct page *page;
 579        int bit_nr;
 580        int page_match;
 581};
 582
 583struct wait_page_queue {
 584        struct page *page;
 585        int bit_nr;
 586        wait_queue_entry_t wait;
 587};
 588
 589static inline bool wake_page_match(struct wait_page_queue *wait_page,
 590                                  struct wait_page_key *key)
 591{
 592        if (wait_page->page != key->page)
 593               return false;
 594        key->page_match = 1;
 595
 596        if (wait_page->bit_nr != key->bit_nr)
 597                return false;
 598
 599        return true;
 600}
 601
 602extern void __lock_page(struct page *page);
 603extern int __lock_page_killable(struct page *page);
 604extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
 605extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 606                                unsigned int flags);
 607extern void unlock_page(struct page *page);
 608
 609/*
 610 * Return true if the page was successfully locked
 611 */
 612static inline int trylock_page(struct page *page)
 613{
 614        page = compound_head(page);
 615        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 616}
 617
 618/*
 619 * lock_page may only be called if we have the page's inode pinned.
 620 */
 621static inline void lock_page(struct page *page)
 622{
 623        might_sleep();
 624        if (!trylock_page(page))
 625                __lock_page(page);
 626}
 627
 628/*
 629 * lock_page_killable is like lock_page but can be interrupted by fatal
 630 * signals.  It returns 0 if it locked the page and -EINTR if it was
 631 * killed while waiting.
 632 */
 633static inline int lock_page_killable(struct page *page)
 634{
 635        might_sleep();
 636        if (!trylock_page(page))
 637                return __lock_page_killable(page);
 638        return 0;
 639}
 640
 641/*
 642 * lock_page_async - Lock the page, unless this would block. If the page
 643 * is already locked, then queue a callback when the page becomes unlocked.
 644 * This callback can then retry the operation.
 645 *
 646 * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
 647 * was already locked and the callback defined in 'wait' was queued.
 648 */
 649static inline int lock_page_async(struct page *page,
 650                                  struct wait_page_queue *wait)
 651{
 652        if (!trylock_page(page))
 653                return __lock_page_async(page, wait);
 654        return 0;
 655}
 656
 657/*
 658 * lock_page_or_retry - Lock the page, unless this would block and the
 659 * caller indicated that it can handle a retry.
 660 *
 661 * Return value and mmap_lock implications depend on flags; see
 662 * __lock_page_or_retry().
 663 */
 664static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 665                                     unsigned int flags)
 666{
 667        might_sleep();
 668        return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
 669}
 670
 671/*
 672 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
 673 * and should not be used directly.
 674 */
 675extern void wait_on_page_bit(struct page *page, int bit_nr);
 676extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 677
 678/* 
 679 * Wait for a page to be unlocked.
 680 *
 681 * This must be called with the caller "holding" the page,
 682 * ie with increased "page->count" so that the page won't
 683 * go away during the wait..
 684 */
 685static inline void wait_on_page_locked(struct page *page)
 686{
 687        if (PageLocked(page))
 688                wait_on_page_bit(compound_head(page), PG_locked);
 689}
 690
 691static inline int wait_on_page_locked_killable(struct page *page)
 692{
 693        if (!PageLocked(page))
 694                return 0;
 695        return wait_on_page_bit_killable(compound_head(page), PG_locked);
 696}
 697
 698int put_and_wait_on_page_locked(struct page *page, int state);
 699void wait_on_page_writeback(struct page *page);
 700int wait_on_page_writeback_killable(struct page *page);
 701extern void end_page_writeback(struct page *page);
 702void wait_for_stable_page(struct page *page);
 703
 704void __set_page_dirty(struct page *, struct address_space *, int warn);
 705int __set_page_dirty_nobuffers(struct page *page);
 706int __set_page_dirty_no_writeback(struct page *page);
 707
 708void page_endio(struct page *page, bool is_write, int err);
 709
 710/**
 711 * set_page_private_2 - Set PG_private_2 on a page and take a ref
 712 * @page: The page.
 713 *
 714 * Set the PG_private_2 flag on a page and take the reference needed for the VM
 715 * to handle its lifetime correctly.  This sets the flag and takes the
 716 * reference unconditionally, so care must be taken not to set the flag again
 717 * if it's already set.
 718 */
 719static inline void set_page_private_2(struct page *page)
 720{
 721        page = compound_head(page);
 722        get_page(page);
 723        SetPagePrivate2(page);
 724}
 725
 726void end_page_private_2(struct page *page);
 727void wait_on_page_private_2(struct page *page);
 728int wait_on_page_private_2_killable(struct page *page);
 729
 730/*
 731 * Add an arbitrary waiter to a page's wait queue
 732 */
 733extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
 734
 735/*
 736 * Fault everything in given userspace address range in.
 737 */
 738static inline int fault_in_pages_writeable(char __user *uaddr, size_t size)
 739{
 740        char __user *end = uaddr + size - 1;
 741
 742        if (unlikely(size == 0))
 743                return 0;
 744
 745        if (unlikely(uaddr > end))
 746                return -EFAULT;
 747        /*
 748         * Writing zeroes into userspace here is OK, because we know that if
 749         * the zero gets there, we'll be overwriting it.
 750         */
 751        do {
 752                if (unlikely(__put_user(0, uaddr) != 0))
 753                        return -EFAULT;
 754                uaddr += PAGE_SIZE;
 755        } while (uaddr <= end);
 756
 757        /* Check whether the range spilled into the next page. */
 758        if (((unsigned long)uaddr & PAGE_MASK) ==
 759                        ((unsigned long)end & PAGE_MASK))
 760                return __put_user(0, end);
 761
 762        return 0;
 763}
 764
 765static inline int fault_in_pages_readable(const char __user *uaddr, size_t size)
 766{
 767        volatile char c;
 768        const char __user *end = uaddr + size - 1;
 769
 770        if (unlikely(size == 0))
 771                return 0;
 772
 773        if (unlikely(uaddr > end))
 774                return -EFAULT;
 775
 776        do {
 777                if (unlikely(__get_user(c, uaddr) != 0))
 778                        return -EFAULT;
 779                uaddr += PAGE_SIZE;
 780        } while (uaddr <= end);
 781
 782        /* Check whether the range spilled into the next page. */
 783        if (((unsigned long)uaddr & PAGE_MASK) ==
 784                        ((unsigned long)end & PAGE_MASK)) {
 785                return __get_user(c, end);
 786        }
 787
 788        (void)c;
 789        return 0;
 790}
 791
 792int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 793                                pgoff_t index, gfp_t gfp_mask);
 794int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 795                                pgoff_t index, gfp_t gfp_mask);
 796extern void delete_from_page_cache(struct page *page);
 797extern void __delete_from_page_cache(struct page *page, void *shadow);
 798void replace_page_cache_page(struct page *old, struct page *new);
 799void delete_from_page_cache_batch(struct address_space *mapping,
 800                                  struct pagevec *pvec);
 801loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
 802                int whence);
 803
 804/*
 805 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 806 * the page is new, so we can just run __SetPageLocked() against it.
 807 */
 808static inline int add_to_page_cache(struct page *page,
 809                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 810{
 811        int error;
 812
 813        __SetPageLocked(page);
 814        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 815        if (unlikely(error))
 816                __ClearPageLocked(page);
 817        return error;
 818}
 819
 820/**
 821 * struct readahead_control - Describes a readahead request.
 822 *
 823 * A readahead request is for consecutive pages.  Filesystems which
 824 * implement the ->readahead method should call readahead_page() or
 825 * readahead_page_batch() in a loop and attempt to start I/O against
 826 * each page in the request.
 827 *
 828 * Most of the fields in this struct are private and should be accessed
 829 * by the functions below.
 830 *
 831 * @file: The file, used primarily by network filesystems for authentication.
 832 *        May be NULL if invoked internally by the filesystem.
 833 * @mapping: Readahead this filesystem object.
 834 * @ra: File readahead state.  May be NULL.
 835 */
 836struct readahead_control {
 837        struct file *file;
 838        struct address_space *mapping;
 839        struct file_ra_state *ra;
 840/* private: use the readahead_* accessors instead */
 841        pgoff_t _index;
 842        unsigned int _nr_pages;
 843        unsigned int _batch_count;
 844};
 845
 846#define DEFINE_READAHEAD(ractl, f, r, m, i)                             \
 847        struct readahead_control ractl = {                              \
 848                .file = f,                                              \
 849                .mapping = m,                                           \
 850                .ra = r,                                                \
 851                ._index = i,                                            \
 852        }
 853
 854#define VM_READAHEAD_PAGES      (SZ_128K / PAGE_SIZE)
 855
 856void page_cache_ra_unbounded(struct readahead_control *,
 857                unsigned long nr_to_read, unsigned long lookahead_count);
 858void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
 859void page_cache_async_ra(struct readahead_control *, struct page *,
 860                unsigned long req_count);
 861void readahead_expand(struct readahead_control *ractl,
 862                      loff_t new_start, size_t new_len);
 863
 864/**
 865 * page_cache_sync_readahead - generic file readahead
 866 * @mapping: address_space which holds the pagecache and I/O vectors
 867 * @ra: file_ra_state which holds the readahead state
 868 * @file: Used by the filesystem for authentication.
 869 * @index: Index of first page to be read.
 870 * @req_count: Total number of pages being read by the caller.
 871 *
 872 * page_cache_sync_readahead() should be called when a cache miss happened:
 873 * it will submit the read.  The readahead logic may decide to piggyback more
 874 * pages onto the read request if access patterns suggest it will improve
 875 * performance.
 876 */
 877static inline
 878void page_cache_sync_readahead(struct address_space *mapping,
 879                struct file_ra_state *ra, struct file *file, pgoff_t index,
 880                unsigned long req_count)
 881{
 882        DEFINE_READAHEAD(ractl, file, ra, mapping, index);
 883        page_cache_sync_ra(&ractl, req_count);
 884}
 885
 886/**
 887 * page_cache_async_readahead - file readahead for marked pages
 888 * @mapping: address_space which holds the pagecache and I/O vectors
 889 * @ra: file_ra_state which holds the readahead state
 890 * @file: Used by the filesystem for authentication.
 891 * @page: The page at @index which triggered the readahead call.
 892 * @index: Index of first page to be read.
 893 * @req_count: Total number of pages being read by the caller.
 894 *
 895 * page_cache_async_readahead() should be called when a page is used which
 896 * is marked as PageReadahead; this is a marker to suggest that the application
 897 * has used up enough of the readahead window that we should start pulling in
 898 * more pages.
 899 */
 900static inline
 901void page_cache_async_readahead(struct address_space *mapping,
 902                struct file_ra_state *ra, struct file *file,
 903                struct page *page, pgoff_t index, unsigned long req_count)
 904{
 905        DEFINE_READAHEAD(ractl, file, ra, mapping, index);
 906        page_cache_async_ra(&ractl, page, req_count);
 907}
 908
 909/**
 910 * readahead_page - Get the next page to read.
 911 * @rac: The current readahead request.
 912 *
 913 * Context: The page is locked and has an elevated refcount.  The caller
 914 * should decreases the refcount once the page has been submitted for I/O
 915 * and unlock the page once all I/O to that page has completed.
 916 * Return: A pointer to the next page, or %NULL if we are done.
 917 */
 918static inline struct page *readahead_page(struct readahead_control *rac)
 919{
 920        struct page *page;
 921
 922        BUG_ON(rac->_batch_count > rac->_nr_pages);
 923        rac->_nr_pages -= rac->_batch_count;
 924        rac->_index += rac->_batch_count;
 925
 926        if (!rac->_nr_pages) {
 927                rac->_batch_count = 0;
 928                return NULL;
 929        }
 930
 931        page = xa_load(&rac->mapping->i_pages, rac->_index);
 932        VM_BUG_ON_PAGE(!PageLocked(page), page);
 933        rac->_batch_count = thp_nr_pages(page);
 934
 935        return page;
 936}
 937
 938static inline unsigned int __readahead_batch(struct readahead_control *rac,
 939                struct page **array, unsigned int array_sz)
 940{
 941        unsigned int i = 0;
 942        XA_STATE(xas, &rac->mapping->i_pages, 0);
 943        struct page *page;
 944
 945        BUG_ON(rac->_batch_count > rac->_nr_pages);
 946        rac->_nr_pages -= rac->_batch_count;
 947        rac->_index += rac->_batch_count;
 948        rac->_batch_count = 0;
 949
 950        xas_set(&xas, rac->_index);
 951        rcu_read_lock();
 952        xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
 953                if (xas_retry(&xas, page))
 954                        continue;
 955                VM_BUG_ON_PAGE(!PageLocked(page), page);
 956                VM_BUG_ON_PAGE(PageTail(page), page);
 957                array[i++] = page;
 958                rac->_batch_count += thp_nr_pages(page);
 959
 960                /*
 961                 * The page cache isn't using multi-index entries yet,
 962                 * so the xas cursor needs to be manually moved to the
 963                 * next index.  This can be removed once the page cache
 964                 * is converted.
 965                 */
 966                if (PageHead(page))
 967                        xas_set(&xas, rac->_index + rac->_batch_count);
 968
 969                if (i == array_sz)
 970                        break;
 971        }
 972        rcu_read_unlock();
 973
 974        return i;
 975}
 976
 977/**
 978 * readahead_page_batch - Get a batch of pages to read.
 979 * @rac: The current readahead request.
 980 * @array: An array of pointers to struct page.
 981 *
 982 * Context: The pages are locked and have an elevated refcount.  The caller
 983 * should decreases the refcount once the page has been submitted for I/O
 984 * and unlock the page once all I/O to that page has completed.
 985 * Return: The number of pages placed in the array.  0 indicates the request
 986 * is complete.
 987 */
 988#define readahead_page_batch(rac, array)                                \
 989        __readahead_batch(rac, array, ARRAY_SIZE(array))
 990
 991/**
 992 * readahead_pos - The byte offset into the file of this readahead request.
 993 * @rac: The readahead request.
 994 */
 995static inline loff_t readahead_pos(struct readahead_control *rac)
 996{
 997        return (loff_t)rac->_index * PAGE_SIZE;
 998}
 999
1000/**
1001 * readahead_length - The number of bytes in this readahead request.
1002 * @rac: The readahead request.
1003 */
1004static inline size_t readahead_length(struct readahead_control *rac)
1005{
1006        return rac->_nr_pages * PAGE_SIZE;
1007}
1008
1009/**
1010 * readahead_index - The index of the first page in this readahead request.
1011 * @rac: The readahead request.
1012 */
1013static inline pgoff_t readahead_index(struct readahead_control *rac)
1014{
1015        return rac->_index;
1016}
1017
1018/**
1019 * readahead_count - The number of pages in this readahead request.
1020 * @rac: The readahead request.
1021 */
1022static inline unsigned int readahead_count(struct readahead_control *rac)
1023{
1024        return rac->_nr_pages;
1025}
1026
1027/**
1028 * readahead_batch_length - The number of bytes in the current batch.
1029 * @rac: The readahead request.
1030 */
1031static inline size_t readahead_batch_length(struct readahead_control *rac)
1032{
1033        return rac->_batch_count * PAGE_SIZE;
1034}
1035
1036static inline unsigned long dir_pages(struct inode *inode)
1037{
1038        return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1039                               PAGE_SHIFT;
1040}
1041
1042/**
1043 * page_mkwrite_check_truncate - check if page was truncated
1044 * @page: the page to check
1045 * @inode: the inode to check the page against
1046 *
1047 * Returns the number of bytes in the page up to EOF,
1048 * or -EFAULT if the page was truncated.
1049 */
1050static inline int page_mkwrite_check_truncate(struct page *page,
1051                                              struct inode *inode)
1052{
1053        loff_t size = i_size_read(inode);
1054        pgoff_t index = size >> PAGE_SHIFT;
1055        int offset = offset_in_page(size);
1056
1057        if (page->mapping != inode->i_mapping)
1058                return -EFAULT;
1059
1060        /* page is wholly inside EOF */
1061        if (page->index < index)
1062                return PAGE_SIZE;
1063        /* page is wholly past EOF */
1064        if (page->index > index || !offset)
1065                return -EFAULT;
1066        /* page is partially inside EOF */
1067        return offset;
1068}
1069
1070/**
1071 * i_blocks_per_page - How many blocks fit in this page.
1072 * @inode: The inode which contains the blocks.
1073 * @page: The page (head page if the page is a THP).
1074 *
1075 * If the block size is larger than the size of this page, return zero.
1076 *
1077 * Context: The caller should hold a refcount on the page to prevent it
1078 * from being split.
1079 * Return: The number of filesystem blocks covered by this page.
1080 */
1081static inline
1082unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1083{
1084        return thp_size(page) >> inode->i_blkbits;
1085}
1086#endif /* _LINUX_PAGEMAP_H */
1087