linux/include/linux/pagemap.h
<<
>>
Prefs
   1#ifndef _LINUX_PAGEMAP_H
   2#define _LINUX_PAGEMAP_H
   3
   4/*
   5 * Copyright 1995 Linus Torvalds
   6 */
   7#include <linux/mm.h>
   8#include <linux/fs.h>
   9#include <linux/list.h>
  10#include <linux/highmem.h>
  11#include <linux/compiler.h>
  12#include <asm/uaccess.h>
  13#include <linux/gfp.h>
  14#include <linux/bitops.h>
  15#include <linux/hardirq.h> /* for in_interrupt() */
  16#include <linux/hugetlb_inline.h>
  17
  18/*
  19 * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
  20 * allocation mode flags.
  21 */
  22enum mapping_flags {
  23        AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
  24        AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
  25        AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
  26        AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
  27        AS_EXITING      = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
  28};
  29
  30static inline void mapping_set_error(struct address_space *mapping, int error)
  31{
  32        if (unlikely(error)) {
  33                if (error == -ENOSPC)
  34                        set_bit(AS_ENOSPC, &mapping->flags);
  35                else
  36                        set_bit(AS_EIO, &mapping->flags);
  37        }
  38}
  39
  40static inline void mapping_set_unevictable(struct address_space *mapping)
  41{
  42        set_bit(AS_UNEVICTABLE, &mapping->flags);
  43}
  44
  45static inline void mapping_clear_unevictable(struct address_space *mapping)
  46{
  47        clear_bit(AS_UNEVICTABLE, &mapping->flags);
  48}
  49
  50static inline int mapping_unevictable(struct address_space *mapping)
  51{
  52        if (mapping)
  53                return test_bit(AS_UNEVICTABLE, &mapping->flags);
  54        return !!mapping;
  55}
  56
  57static inline void mapping_set_exiting(struct address_space *mapping)
  58{
  59        set_bit(AS_EXITING, &mapping->flags);
  60}
  61
  62static inline int mapping_exiting(struct address_space *mapping)
  63{
  64        return test_bit(AS_EXITING, &mapping->flags);
  65}
  66
  67static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  68{
  69        return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
  70}
  71
  72/* Restricts the given gfp_mask to what the mapping allows. */
  73static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
  74                gfp_t gfp_mask)
  75{
  76        return mapping_gfp_mask(mapping) & gfp_mask;
  77}
  78
  79/*
  80 * This is non-atomic.  Only to be used before the mapping is activated.
  81 * Probably needs a barrier...
  82 */
  83static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  84{
  85        m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
  86                                (__force unsigned long)mask;
  87}
  88
  89void release_pages(struct page **pages, int nr, bool cold);
  90
  91/*
  92 * speculatively take a reference to a page.
  93 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
  94 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
  95 *
  96 * This function must be called inside the same rcu_read_lock() section as has
  97 * been used to lookup the page in the pagecache radix-tree (or page table):
  98 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
  99 *
 100 * Unless an RCU grace period has passed, the count of all pages coming out
 101 * of the allocator must be considered unstable. page_count may return higher
 102 * than expected, and put_page must be able to do the right thing when the
 103 * page has been finished with, no matter what it is subsequently allocated
 104 * for (because put_page is what is used here to drop an invalid speculative
 105 * reference).
 106 *
 107 * This is the interesting part of the lockless pagecache (and lockless
 108 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
 109 * has the following pattern:
 110 * 1. find page in radix tree
 111 * 2. conditionally increment refcount
 112 * 3. check the page is still in pagecache (if no, goto 1)
 113 *
 114 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
 115 * following (with tree_lock held for write):
 116 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
 117 * B. remove page from pagecache
 118 * C. free the page
 119 *
 120 * There are 2 critical interleavings that matter:
 121 * - 2 runs before A: in this case, A sees elevated refcount and bails out
 122 * - A runs before 2: in this case, 2 sees zero refcount and retries;
 123 *   subsequently, B will complete and 1 will find no page, causing the
 124 *   lookup to return NULL.
 125 *
 126 * It is possible that between 1 and 2, the page is removed then the exact same
 127 * page is inserted into the same position in pagecache. That's OK: the
 128 * old find_get_page using tree_lock could equally have run before or after
 129 * such a re-insertion, depending on order that locks are granted.
 130 *
 131 * Lookups racing against pagecache insertion isn't a big problem: either 1
 132 * will find the page or it will not. Likewise, the old find_get_page could run
 133 * either before the insertion or afterwards, depending on timing.
 134 */
 135static inline int page_cache_get_speculative(struct page *page)
 136{
 137        VM_BUG_ON(in_interrupt());
 138
 139#ifdef CONFIG_TINY_RCU
 140# ifdef CONFIG_PREEMPT_COUNT
 141        VM_BUG_ON(!in_atomic());
 142# endif
 143        /*
 144         * Preempt must be disabled here - we rely on rcu_read_lock doing
 145         * this for us.
 146         *
 147         * Pagecache won't be truncated from interrupt context, so if we have
 148         * found a page in the radix tree here, we have pinned its refcount by
 149         * disabling preempt, and hence no need for the "speculative get" that
 150         * SMP requires.
 151         */
 152        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 153        page_ref_inc(page);
 154
 155#else
 156        if (unlikely(!get_page_unless_zero(page))) {
 157                /*
 158                 * Either the page has been freed, or will be freed.
 159                 * In either case, retry here and the caller should
 160                 * do the right thing (see comments above).
 161                 */
 162                return 0;
 163        }
 164#endif
 165        VM_BUG_ON_PAGE(PageTail(page), page);
 166
 167        return 1;
 168}
 169
 170/*
 171 * Same as above, but add instead of inc (could just be merged)
 172 */
 173static inline int page_cache_add_speculative(struct page *page, int count)
 174{
 175        VM_BUG_ON(in_interrupt());
 176
 177#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
 178# ifdef CONFIG_PREEMPT_COUNT
 179        VM_BUG_ON(!in_atomic());
 180# endif
 181        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 182        page_ref_add(page, count);
 183
 184#else
 185        if (unlikely(!page_ref_add_unless(page, count, 0)))
 186                return 0;
 187#endif
 188        VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
 189
 190        return 1;
 191}
 192
 193#ifdef CONFIG_NUMA
 194extern struct page *__page_cache_alloc(gfp_t gfp);
 195#else
 196static inline struct page *__page_cache_alloc(gfp_t gfp)
 197{
 198        return alloc_pages(gfp, 0);
 199}
 200#endif
 201
 202static inline struct page *page_cache_alloc(struct address_space *x)
 203{
 204        return __page_cache_alloc(mapping_gfp_mask(x));
 205}
 206
 207static inline struct page *page_cache_alloc_cold(struct address_space *x)
 208{
 209        return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
 210}
 211
 212static inline gfp_t readahead_gfp_mask(struct address_space *x)
 213{
 214        return mapping_gfp_mask(x) |
 215                                  __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
 216}
 217
 218typedef int filler_t(void *, struct page *);
 219
 220pgoff_t page_cache_next_hole(struct address_space *mapping,
 221                             pgoff_t index, unsigned long max_scan);
 222pgoff_t page_cache_prev_hole(struct address_space *mapping,
 223                             pgoff_t index, unsigned long max_scan);
 224
 225#define FGP_ACCESSED            0x00000001
 226#define FGP_LOCK                0x00000002
 227#define FGP_CREAT               0x00000004
 228#define FGP_WRITE               0x00000008
 229#define FGP_NOFS                0x00000010
 230#define FGP_NOWAIT              0x00000020
 231
 232struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 233                int fgp_flags, gfp_t cache_gfp_mask);
 234
 235/**
 236 * find_get_page - find and get a page reference
 237 * @mapping: the address_space to search
 238 * @offset: the page index
 239 *
 240 * Looks up the page cache slot at @mapping & @offset.  If there is a
 241 * page cache page, it is returned with an increased refcount.
 242 *
 243 * Otherwise, %NULL is returned.
 244 */
 245static inline struct page *find_get_page(struct address_space *mapping,
 246                                        pgoff_t offset)
 247{
 248        return pagecache_get_page(mapping, offset, 0, 0);
 249}
 250
 251static inline struct page *find_get_page_flags(struct address_space *mapping,
 252                                        pgoff_t offset, int fgp_flags)
 253{
 254        return pagecache_get_page(mapping, offset, fgp_flags, 0);
 255}
 256
 257/**
 258 * find_lock_page - locate, pin and lock a pagecache page
 259 * pagecache_get_page - find and get a page reference
 260 * @mapping: the address_space to search
 261 * @offset: the page index
 262 *
 263 * Looks up the page cache slot at @mapping & @offset.  If there is a
 264 * page cache page, it is returned locked and with an increased
 265 * refcount.
 266 *
 267 * Otherwise, %NULL is returned.
 268 *
 269 * find_lock_page() may sleep.
 270 */
 271static inline struct page *find_lock_page(struct address_space *mapping,
 272                                        pgoff_t offset)
 273{
 274        return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
 275}
 276
 277/**
 278 * find_or_create_page - locate or add a pagecache page
 279 * @mapping: the page's address_space
 280 * @index: the page's index into the mapping
 281 * @gfp_mask: page allocation mode
 282 *
 283 * Looks up the page cache slot at @mapping & @offset.  If there is a
 284 * page cache page, it is returned locked and with an increased
 285 * refcount.
 286 *
 287 * If the page is not present, a new page is allocated using @gfp_mask
 288 * and added to the page cache and the VM's LRU list.  The page is
 289 * returned locked and with an increased refcount.
 290 *
 291 * On memory exhaustion, %NULL is returned.
 292 *
 293 * find_or_create_page() may sleep, even if @gfp_flags specifies an
 294 * atomic allocation!
 295 */
 296static inline struct page *find_or_create_page(struct address_space *mapping,
 297                                        pgoff_t offset, gfp_t gfp_mask)
 298{
 299        return pagecache_get_page(mapping, offset,
 300                                        FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
 301                                        gfp_mask);
 302}
 303
 304/**
 305 * grab_cache_page_nowait - returns locked page at given index in given cache
 306 * @mapping: target address_space
 307 * @index: the page index
 308 *
 309 * Same as grab_cache_page(), but do not wait if the page is unavailable.
 310 * This is intended for speculative data generators, where the data can
 311 * be regenerated if the page couldn't be grabbed.  This routine should
 312 * be safe to call while holding the lock for another page.
 313 *
 314 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
 315 * and deadlock against the caller's locked page.
 316 */
 317static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
 318                                pgoff_t index)
 319{
 320        return pagecache_get_page(mapping, index,
 321                        FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
 322                        mapping_gfp_mask(mapping));
 323}
 324
 325struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
 326struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
 327unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
 328                          unsigned int nr_entries, struct page **entries,
 329                          pgoff_t *indices);
 330unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 331                        unsigned int nr_pages, struct page **pages);
 332unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 333                               unsigned int nr_pages, struct page **pages);
 334unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 335                        int tag, unsigned int nr_pages, struct page **pages);
 336unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
 337                        int tag, unsigned int nr_entries,
 338                        struct page **entries, pgoff_t *indices);
 339
 340struct page *grab_cache_page_write_begin(struct address_space *mapping,
 341                        pgoff_t index, unsigned flags);
 342
 343/*
 344 * Returns locked page at given index in given cache, creating it if needed.
 345 */
 346static inline struct page *grab_cache_page(struct address_space *mapping,
 347                                                                pgoff_t index)
 348{
 349        return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 350}
 351
 352extern struct page * read_cache_page(struct address_space *mapping,
 353                                pgoff_t index, filler_t *filler, void *data);
 354extern struct page * read_cache_page_gfp(struct address_space *mapping,
 355                                pgoff_t index, gfp_t gfp_mask);
 356extern int read_cache_pages(struct address_space *mapping,
 357                struct list_head *pages, filler_t *filler, void *data);
 358
 359static inline struct page *read_mapping_page(struct address_space *mapping,
 360                                pgoff_t index, void *data)
 361{
 362        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
 363        return read_cache_page(mapping, index, filler, data);
 364}
 365
 366/*
 367 * Get the offset in PAGE_SIZE.
 368 * (TODO: hugepage should have ->index in PAGE_SIZE)
 369 */
 370static inline pgoff_t page_to_pgoff(struct page *page)
 371{
 372        pgoff_t pgoff;
 373
 374        if (unlikely(PageHeadHuge(page)))
 375                return page->index << compound_order(page);
 376
 377        if (likely(!PageTransTail(page)))
 378                return page->index;
 379
 380        /*
 381         *  We don't initialize ->index for tail pages: calculate based on
 382         *  head page
 383         */
 384        pgoff = compound_head(page)->index;
 385        pgoff += page - compound_head(page);
 386        return pgoff;
 387}
 388
 389/*
 390 * Return byte-offset into filesystem object for page.
 391 */
 392static inline loff_t page_offset(struct page *page)
 393{
 394        return ((loff_t)page->index) << PAGE_SHIFT;
 395}
 396
 397static inline loff_t page_file_offset(struct page *page)
 398{
 399        return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
 400}
 401
 402extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 403                                     unsigned long address);
 404
 405static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 406                                        unsigned long address)
 407{
 408        pgoff_t pgoff;
 409        if (unlikely(is_vm_hugetlb_page(vma)))
 410                return linear_hugepage_index(vma, address);
 411        pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 412        pgoff += vma->vm_pgoff;
 413        return pgoff;
 414}
 415
 416extern void __lock_page(struct page *page);
 417extern int __lock_page_killable(struct page *page);
 418extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 419                                unsigned int flags);
 420extern void unlock_page(struct page *page);
 421
 422static inline int trylock_page(struct page *page)
 423{
 424        page = compound_head(page);
 425        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 426}
 427
 428/*
 429 * lock_page may only be called if we have the page's inode pinned.
 430 */
 431static inline void lock_page(struct page *page)
 432{
 433        might_sleep();
 434        if (!trylock_page(page))
 435                __lock_page(page);
 436}
 437
 438/*
 439 * lock_page_killable is like lock_page but can be interrupted by fatal
 440 * signals.  It returns 0 if it locked the page and -EINTR if it was
 441 * killed while waiting.
 442 */
 443static inline int lock_page_killable(struct page *page)
 444{
 445        might_sleep();
 446        if (!trylock_page(page))
 447                return __lock_page_killable(page);
 448        return 0;
 449}
 450
 451/*
 452 * lock_page_or_retry - Lock the page, unless this would block and the
 453 * caller indicated that it can handle a retry.
 454 *
 455 * Return value and mmap_sem implications depend on flags; see
 456 * __lock_page_or_retry().
 457 */
 458static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 459                                     unsigned int flags)
 460{
 461        might_sleep();
 462        return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
 463}
 464
 465/*
 466 * This is exported only for wait_on_page_locked/wait_on_page_writeback,
 467 * and for filesystems which need to wait on PG_private.
 468 */
 469extern void wait_on_page_bit(struct page *page, int bit_nr);
 470
 471extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 472extern int wait_on_page_bit_killable_timeout(struct page *page,
 473                                             int bit_nr, unsigned long timeout);
 474
 475static inline int wait_on_page_locked_killable(struct page *page)
 476{
 477        if (!PageLocked(page))
 478                return 0;
 479        return wait_on_page_bit_killable(compound_head(page), PG_locked);
 480}
 481
 482extern wait_queue_head_t *page_waitqueue(struct page *page);
 483static inline void wake_up_page(struct page *page, int bit)
 484{
 485        __wake_up_bit(page_waitqueue(page), &page->flags, bit);
 486}
 487
 488/* 
 489 * Wait for a page to be unlocked.
 490 *
 491 * This must be called with the caller "holding" the page,
 492 * ie with increased "page->count" so that the page won't
 493 * go away during the wait..
 494 */
 495static inline void wait_on_page_locked(struct page *page)
 496{
 497        if (PageLocked(page))
 498                wait_on_page_bit(compound_head(page), PG_locked);
 499}
 500
 501/* 
 502 * Wait for a page to complete writeback
 503 */
 504static inline void wait_on_page_writeback(struct page *page)
 505{
 506        if (PageWriteback(page))
 507                wait_on_page_bit(page, PG_writeback);
 508}
 509
 510extern void end_page_writeback(struct page *page);
 511void wait_for_stable_page(struct page *page);
 512
 513void page_endio(struct page *page, bool is_write, int err);
 514
 515/*
 516 * Add an arbitrary waiter to a page's wait queue
 517 */
 518extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
 519
 520/*
 521 * Fault one or two userspace pages into pagetables.
 522 * Return -EINVAL if more than two pages would be needed.
 523 * Return non-zero on a fault.
 524 */
 525static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 526{
 527        int span, ret;
 528
 529        if (unlikely(size == 0))
 530                return 0;
 531
 532        span = offset_in_page(uaddr) + size;
 533        if (span > 2 * PAGE_SIZE)
 534                return -EINVAL;
 535        /*
 536         * Writing zeroes into userspace here is OK, because we know that if
 537         * the zero gets there, we'll be overwriting it.
 538         */
 539        ret = __put_user(0, uaddr);
 540        if (ret == 0 && span > PAGE_SIZE)
 541                ret = __put_user(0, uaddr + size - 1);
 542        return ret;
 543}
 544
 545static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 546{
 547        volatile char c;
 548        int ret;
 549
 550        if (unlikely(size == 0))
 551                return 0;
 552
 553        ret = __get_user(c, uaddr);
 554        if (ret == 0) {
 555                const char __user *end = uaddr + size - 1;
 556
 557                if (((unsigned long)uaddr & PAGE_MASK) !=
 558                                ((unsigned long)end & PAGE_MASK)) {
 559                        ret = __get_user(c, end);
 560                        (void)c;
 561                }
 562        }
 563        return ret;
 564}
 565
 566/*
 567 * Multipage variants of the above prefault helpers, useful if more than
 568 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
 569 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
 570 * filemap.c hotpaths.
 571 */
 572static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
 573{
 574        char __user *end = uaddr + size - 1;
 575
 576        if (unlikely(size == 0))
 577                return 0;
 578
 579        if (unlikely(uaddr > end))
 580                return -EFAULT;
 581        /*
 582         * Writing zeroes into userspace here is OK, because we know that if
 583         * the zero gets there, we'll be overwriting it.
 584         */
 585        do {
 586                if (unlikely(__put_user(0, uaddr) != 0))
 587                        return -EFAULT;
 588                uaddr += PAGE_SIZE;
 589        } while (uaddr <= end);
 590
 591        /* Check whether the range spilled into the next page. */
 592        if (((unsigned long)uaddr & PAGE_MASK) ==
 593                        ((unsigned long)end & PAGE_MASK))
 594                return __put_user(0, end);
 595
 596        return 0;
 597}
 598
 599static inline int fault_in_multipages_readable(const char __user *uaddr,
 600                                               int size)
 601{
 602        volatile char c;
 603        const char __user *end = uaddr + size - 1;
 604
 605        if (unlikely(size == 0))
 606                return 0;
 607
 608        if (unlikely(uaddr > end))
 609                return -EFAULT;
 610
 611        do {
 612                if (unlikely(__get_user(c, uaddr) != 0))
 613                        return -EFAULT;
 614                uaddr += PAGE_SIZE;
 615        } while (uaddr <= end);
 616
 617        /* Check whether the range spilled into the next page. */
 618        if (((unsigned long)uaddr & PAGE_MASK) ==
 619                        ((unsigned long)end & PAGE_MASK)) {
 620                return __get_user(c, end);
 621        }
 622
 623        (void)c;
 624        return 0;
 625}
 626
 627int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 628                                pgoff_t index, gfp_t gfp_mask);
 629int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 630                                pgoff_t index, gfp_t gfp_mask);
 631extern void delete_from_page_cache(struct page *page);
 632extern void __delete_from_page_cache(struct page *page, void *shadow);
 633int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 634
 635/*
 636 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 637 * the page is new, so we can just run __SetPageLocked() against it.
 638 */
 639static inline int add_to_page_cache(struct page *page,
 640                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 641{
 642        int error;
 643
 644        __SetPageLocked(page);
 645        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 646        if (unlikely(error))
 647                __ClearPageLocked(page);
 648        return error;
 649}
 650
 651static inline unsigned long dir_pages(struct inode *inode)
 652{
 653        return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
 654                               PAGE_SHIFT;
 655}
 656
 657#endif /* _LINUX_PAGEMAP_H */
 658