linux/include/linux/pagemap.h
<<
>>
Prefs
   1#ifndef _LINUX_PAGEMAP_H
   2#define _LINUX_PAGEMAP_H
   3
   4/*
   5 * Copyright 1995 Linus Torvalds
   6 */
   7#include <linux/mm.h>
   8#include <linux/fs.h>
   9#include <linux/list.h>
  10#include <linux/highmem.h>
  11#include <linux/compiler.h>
  12#include <asm/uaccess.h>
  13#include <linux/gfp.h>
  14#include <linux/bitops.h>
  15#include <linux/hardirq.h> /* for in_interrupt() */
  16#include <linux/hugetlb_inline.h>
  17
  18/*
  19 * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
  20 * allocation mode flags.
  21 */
  22enum mapping_flags {
  23        AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
  24        AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
  25        AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
  26        AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
  27        AS_BALLOON_MAP  = __GFP_BITS_SHIFT + 4, /* balloon page special map */
  28        AS_EXITING      = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
  29};
  30
  31static inline void mapping_set_error(struct address_space *mapping, int error)
  32{
  33        if (unlikely(error)) {
  34                if (error == -ENOSPC)
  35                        set_bit(AS_ENOSPC, &mapping->flags);
  36                else
  37                        set_bit(AS_EIO, &mapping->flags);
  38        }
  39}
  40
  41static inline void mapping_set_unevictable(struct address_space *mapping)
  42{
  43        set_bit(AS_UNEVICTABLE, &mapping->flags);
  44}
  45
  46static inline void mapping_clear_unevictable(struct address_space *mapping)
  47{
  48        clear_bit(AS_UNEVICTABLE, &mapping->flags);
  49}
  50
  51static inline int mapping_unevictable(struct address_space *mapping)
  52{
  53        if (mapping)
  54                return test_bit(AS_UNEVICTABLE, &mapping->flags);
  55        return !!mapping;
  56}
  57
  58static inline void mapping_set_balloon(struct address_space *mapping)
  59{
  60        set_bit(AS_BALLOON_MAP, &mapping->flags);
  61}
  62
  63static inline void mapping_clear_balloon(struct address_space *mapping)
  64{
  65        clear_bit(AS_BALLOON_MAP, &mapping->flags);
  66}
  67
  68static inline int mapping_balloon(struct address_space *mapping)
  69{
  70        return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
  71}
  72
  73static inline void mapping_set_exiting(struct address_space *mapping)
  74{
  75        set_bit(AS_EXITING, &mapping->flags);
  76}
  77
  78static inline int mapping_exiting(struct address_space *mapping)
  79{
  80        return test_bit(AS_EXITING, &mapping->flags);
  81}
  82
  83static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  84{
  85        return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
  86}
  87
  88/*
  89 * This is non-atomic.  Only to be used before the mapping is activated.
  90 * Probably needs a barrier...
  91 */
  92static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  93{
  94        m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
  95                                (__force unsigned long)mask;
  96}
  97
  98/*
  99 * The page cache can done in larger chunks than
 100 * one page, because it allows for more efficient
 101 * throughput (it can then be mapped into user
 102 * space in smaller chunks for same flexibility).
 103 *
 104 * Or rather, it _will_ be done in larger chunks.
 105 */
 106#define PAGE_CACHE_SHIFT        PAGE_SHIFT
 107#define PAGE_CACHE_SIZE         PAGE_SIZE
 108#define PAGE_CACHE_MASK         PAGE_MASK
 109#define PAGE_CACHE_ALIGN(addr)  (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
 110
 111#define page_cache_get(page)            get_page(page)
 112#define page_cache_release(page)        put_page(page)
 113void release_pages(struct page **pages, int nr, int cold);
 114
 115/*
 116 * speculatively take a reference to a page.
 117 * If the page is free (_count == 0), then _count is untouched, and 0
 118 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
 119 *
 120 * This function must be called inside the same rcu_read_lock() section as has
 121 * been used to lookup the page in the pagecache radix-tree (or page table):
 122 * this allows allocators to use a synchronize_rcu() to stabilize _count.
 123 *
 124 * Unless an RCU grace period has passed, the count of all pages coming out
 125 * of the allocator must be considered unstable. page_count may return higher
 126 * than expected, and put_page must be able to do the right thing when the
 127 * page has been finished with, no matter what it is subsequently allocated
 128 * for (because put_page is what is used here to drop an invalid speculative
 129 * reference).
 130 *
 131 * This is the interesting part of the lockless pagecache (and lockless
 132 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
 133 * has the following pattern:
 134 * 1. find page in radix tree
 135 * 2. conditionally increment refcount
 136 * 3. check the page is still in pagecache (if no, goto 1)
 137 *
 138 * Remove-side that cares about stability of _count (eg. reclaim) has the
 139 * following (with tree_lock held for write):
 140 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
 141 * B. remove page from pagecache
 142 * C. free the page
 143 *
 144 * There are 2 critical interleavings that matter:
 145 * - 2 runs before A: in this case, A sees elevated refcount and bails out
 146 * - A runs before 2: in this case, 2 sees zero refcount and retries;
 147 *   subsequently, B will complete and 1 will find no page, causing the
 148 *   lookup to return NULL.
 149 *
 150 * It is possible that between 1 and 2, the page is removed then the exact same
 151 * page is inserted into the same position in pagecache. That's OK: the
 152 * old find_get_page using tree_lock could equally have run before or after
 153 * such a re-insertion, depending on order that locks are granted.
 154 *
 155 * Lookups racing against pagecache insertion isn't a big problem: either 1
 156 * will find the page or it will not. Likewise, the old find_get_page could run
 157 * either before the insertion or afterwards, depending on timing.
 158 */
 159static inline int page_cache_get_speculative(struct page *page)
 160{
 161        VM_BUG_ON(in_interrupt());
 162
 163#ifdef CONFIG_TINY_RCU
 164# ifdef CONFIG_PREEMPT_COUNT
 165        VM_BUG_ON(!in_atomic());
 166# endif
 167        /*
 168         * Preempt must be disabled here - we rely on rcu_read_lock doing
 169         * this for us.
 170         *
 171         * Pagecache won't be truncated from interrupt context, so if we have
 172         * found a page in the radix tree here, we have pinned its refcount by
 173         * disabling preempt, and hence no need for the "speculative get" that
 174         * SMP requires.
 175         */
 176        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 177        atomic_inc(&page->_count);
 178
 179#else
 180        if (unlikely(!get_page_unless_zero(page))) {
 181                /*
 182                 * Either the page has been freed, or will be freed.
 183                 * In either case, retry here and the caller should
 184                 * do the right thing (see comments above).
 185                 */
 186                return 0;
 187        }
 188#endif
 189        VM_BUG_ON_PAGE(PageTail(page), page);
 190
 191        return 1;
 192}
 193
 194/*
 195 * Same as above, but add instead of inc (could just be merged)
 196 */
 197static inline int page_cache_add_speculative(struct page *page, int count)
 198{
 199        VM_BUG_ON(in_interrupt());
 200
 201#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
 202# ifdef CONFIG_PREEMPT_COUNT
 203        VM_BUG_ON(!in_atomic());
 204# endif
 205        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 206        atomic_add(count, &page->_count);
 207
 208#else
 209        if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
 210                return 0;
 211#endif
 212        VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
 213
 214        return 1;
 215}
 216
 217static inline int page_freeze_refs(struct page *page, int count)
 218{
 219        return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
 220}
 221
 222static inline void page_unfreeze_refs(struct page *page, int count)
 223{
 224        VM_BUG_ON_PAGE(page_count(page) != 0, page);
 225        VM_BUG_ON(count == 0);
 226
 227        atomic_set(&page->_count, count);
 228}
 229
 230#ifdef CONFIG_NUMA
 231extern struct page *__page_cache_alloc(gfp_t gfp);
 232#else
 233static inline struct page *__page_cache_alloc(gfp_t gfp)
 234{
 235        return alloc_pages(gfp, 0);
 236}
 237#endif
 238
 239static inline struct page *page_cache_alloc(struct address_space *x)
 240{
 241        return __page_cache_alloc(mapping_gfp_mask(x));
 242}
 243
 244static inline struct page *page_cache_alloc_cold(struct address_space *x)
 245{
 246        return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
 247}
 248
 249static inline struct page *page_cache_alloc_readahead(struct address_space *x)
 250{
 251        return __page_cache_alloc(mapping_gfp_mask(x) |
 252                                  __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
 253}
 254
 255typedef int filler_t(void *, struct page *);
 256
 257pgoff_t page_cache_next_hole(struct address_space *mapping,
 258                             pgoff_t index, unsigned long max_scan);
 259pgoff_t page_cache_prev_hole(struct address_space *mapping,
 260                             pgoff_t index, unsigned long max_scan);
 261
 262struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
 263struct page *find_get_page(struct address_space *mapping, pgoff_t offset);
 264struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
 265struct page *find_lock_page(struct address_space *mapping, pgoff_t offset);
 266struct page *find_or_create_page(struct address_space *mapping, pgoff_t index,
 267                                 gfp_t gfp_mask);
 268unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
 269                          unsigned int nr_entries, struct page **entries,
 270                          pgoff_t *indices);
 271unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 272                        unsigned int nr_pages, struct page **pages);
 273unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 274                               unsigned int nr_pages, struct page **pages);
 275unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 276                        int tag, unsigned int nr_pages, struct page **pages);
 277
 278struct page *grab_cache_page_write_begin(struct address_space *mapping,
 279                        pgoff_t index, unsigned flags);
 280
 281/*
 282 * Returns locked page at given index in given cache, creating it if needed.
 283 */
 284static inline struct page *grab_cache_page(struct address_space *mapping,
 285                                                                pgoff_t index)
 286{
 287        return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 288}
 289
 290extern struct page * grab_cache_page_nowait(struct address_space *mapping,
 291                                pgoff_t index);
 292extern struct page * read_cache_page(struct address_space *mapping,
 293                                pgoff_t index, filler_t *filler, void *data);
 294extern struct page * read_cache_page_gfp(struct address_space *mapping,
 295                                pgoff_t index, gfp_t gfp_mask);
 296extern int read_cache_pages(struct address_space *mapping,
 297                struct list_head *pages, filler_t *filler, void *data);
 298
 299static inline struct page *read_mapping_page(struct address_space *mapping,
 300                                pgoff_t index, void *data)
 301{
 302        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
 303        return read_cache_page(mapping, index, filler, data);
 304}
 305
 306/*
 307 * Return byte-offset into filesystem object for page.
 308 */
 309static inline loff_t page_offset(struct page *page)
 310{
 311        return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
 312}
 313
 314static inline loff_t page_file_offset(struct page *page)
 315{
 316        return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
 317}
 318
 319extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 320                                     unsigned long address);
 321
 322static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 323                                        unsigned long address)
 324{
 325        pgoff_t pgoff;
 326        if (unlikely(is_vm_hugetlb_page(vma)))
 327                return linear_hugepage_index(vma, address);
 328        pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 329        pgoff += vma->vm_pgoff;
 330        return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 331}
 332
 333extern void __lock_page(struct page *page);
 334extern int __lock_page_killable(struct page *page);
 335extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 336                                unsigned int flags);
 337extern void unlock_page(struct page *page);
 338
 339static inline void __set_page_locked(struct page *page)
 340{
 341        __set_bit(PG_locked, &page->flags);
 342}
 343
 344static inline void __clear_page_locked(struct page *page)
 345{
 346        __clear_bit(PG_locked, &page->flags);
 347}
 348
 349static inline int trylock_page(struct page *page)
 350{
 351        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 352}
 353
 354/*
 355 * lock_page may only be called if we have the page's inode pinned.
 356 */
 357static inline void lock_page(struct page *page)
 358{
 359        might_sleep();
 360        if (!trylock_page(page))
 361                __lock_page(page);
 362}
 363
 364/*
 365 * lock_page_killable is like lock_page but can be interrupted by fatal
 366 * signals.  It returns 0 if it locked the page and -EINTR if it was
 367 * killed while waiting.
 368 */
 369static inline int lock_page_killable(struct page *page)
 370{
 371        might_sleep();
 372        if (!trylock_page(page))
 373                return __lock_page_killable(page);
 374        return 0;
 375}
 376
 377/*
 378 * lock_page_or_retry - Lock the page, unless this would block and the
 379 * caller indicated that it can handle a retry.
 380 */
 381static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 382                                     unsigned int flags)
 383{
 384        might_sleep();
 385        return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
 386}
 387
 388/*
 389 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
 390 * Never use this directly!
 391 */
 392extern void wait_on_page_bit(struct page *page, int bit_nr);
 393
 394extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 395
 396static inline int wait_on_page_locked_killable(struct page *page)
 397{
 398        if (PageLocked(page))
 399                return wait_on_page_bit_killable(page, PG_locked);
 400        return 0;
 401}
 402
 403/* 
 404 * Wait for a page to be unlocked.
 405 *
 406 * This must be called with the caller "holding" the page,
 407 * ie with increased "page->count" so that the page won't
 408 * go away during the wait..
 409 */
 410static inline void wait_on_page_locked(struct page *page)
 411{
 412        if (PageLocked(page))
 413                wait_on_page_bit(page, PG_locked);
 414}
 415
 416/* 
 417 * Wait for a page to complete writeback
 418 */
 419static inline void wait_on_page_writeback(struct page *page)
 420{
 421        if (PageWriteback(page))
 422                wait_on_page_bit(page, PG_writeback);
 423}
 424
 425extern void end_page_writeback(struct page *page);
 426void wait_for_stable_page(struct page *page);
 427
 428/*
 429 * Add an arbitrary waiter to a page's wait queue
 430 */
 431extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
 432
 433/*
 434 * Fault a userspace page into pagetables.  Return non-zero on a fault.
 435 *
 436 * This assumes that two userspace pages are always sufficient.  That's
 437 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
 438 */
 439static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 440{
 441        int ret;
 442
 443        if (unlikely(size == 0))
 444                return 0;
 445
 446        /*
 447         * Writing zeroes into userspace here is OK, because we know that if
 448         * the zero gets there, we'll be overwriting it.
 449         */
 450        ret = __put_user(0, uaddr);
 451        if (ret == 0) {
 452                char __user *end = uaddr + size - 1;
 453
 454                /*
 455                 * If the page was already mapped, this will get a cache miss
 456                 * for sure, so try to avoid doing it.
 457                 */
 458                if (((unsigned long)uaddr & PAGE_MASK) !=
 459                                ((unsigned long)end & PAGE_MASK))
 460                        ret = __put_user(0, end);
 461        }
 462        return ret;
 463}
 464
 465static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 466{
 467        volatile char c;
 468        int ret;
 469
 470        if (unlikely(size == 0))
 471                return 0;
 472
 473        ret = __get_user(c, uaddr);
 474        if (ret == 0) {
 475                const char __user *end = uaddr + size - 1;
 476
 477                if (((unsigned long)uaddr & PAGE_MASK) !=
 478                                ((unsigned long)end & PAGE_MASK)) {
 479                        ret = __get_user(c, end);
 480                        (void)c;
 481                }
 482        }
 483        return ret;
 484}
 485
 486/*
 487 * Multipage variants of the above prefault helpers, useful if more than
 488 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
 489 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
 490 * filemap.c hotpaths.
 491 */
 492static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
 493{
 494        int ret = 0;
 495        char __user *end = uaddr + size - 1;
 496
 497        if (unlikely(size == 0))
 498                return ret;
 499
 500        /*
 501         * Writing zeroes into userspace here is OK, because we know that if
 502         * the zero gets there, we'll be overwriting it.
 503         */
 504        while (uaddr <= end) {
 505                ret = __put_user(0, uaddr);
 506                if (ret != 0)
 507                        return ret;
 508                uaddr += PAGE_SIZE;
 509        }
 510
 511        /* Check whether the range spilled into the next page. */
 512        if (((unsigned long)uaddr & PAGE_MASK) ==
 513                        ((unsigned long)end & PAGE_MASK))
 514                ret = __put_user(0, end);
 515
 516        return ret;
 517}
 518
 519static inline int fault_in_multipages_readable(const char __user *uaddr,
 520                                               int size)
 521{
 522        volatile char c;
 523        int ret = 0;
 524        const char __user *end = uaddr + size - 1;
 525
 526        if (unlikely(size == 0))
 527                return ret;
 528
 529        while (uaddr <= end) {
 530                ret = __get_user(c, uaddr);
 531                if (ret != 0)
 532                        return ret;
 533                uaddr += PAGE_SIZE;
 534        }
 535
 536        /* Check whether the range spilled into the next page. */
 537        if (((unsigned long)uaddr & PAGE_MASK) ==
 538                        ((unsigned long)end & PAGE_MASK)) {
 539                ret = __get_user(c, end);
 540                (void)c;
 541        }
 542
 543        return ret;
 544}
 545
 546int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 547                                pgoff_t index, gfp_t gfp_mask);
 548int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 549                                pgoff_t index, gfp_t gfp_mask);
 550extern void delete_from_page_cache(struct page *page);
 551extern void __delete_from_page_cache(struct page *page, void *shadow);
 552int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 553
 554/*
 555 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 556 * the page is new, so we can just run __set_page_locked() against it.
 557 */
 558static inline int add_to_page_cache(struct page *page,
 559                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 560{
 561        int error;
 562
 563        __set_page_locked(page);
 564        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 565        if (unlikely(error))
 566                __clear_page_locked(page);
 567        return error;
 568}
 569
 570#endif /* _LINUX_PAGEMAP_H */
 571