linux/include/linux/pagemap.h
<<
>>
Prefs
   1#ifndef _LINUX_PAGEMAP_H
   2#define _LINUX_PAGEMAP_H
   3
   4/*
   5 * Copyright 1995 Linus Torvalds
   6 */
   7#include <linux/mm.h>
   8#include <linux/fs.h>
   9#include <linux/list.h>
  10#include <linux/highmem.h>
  11#include <linux/compiler.h>
  12#include <linux/uaccess.h>
  13#include <linux/gfp.h>
  14#include <linux/bitops.h>
  15#include <linux/hardirq.h> /* for in_interrupt() */
  16#include <linux/hugetlb_inline.h>
  17
  18/*
  19 * Bits in mapping->flags.
  20 */
  21enum mapping_flags {
  22        AS_EIO          = 0,    /* IO error on async write */
  23        AS_ENOSPC       = 1,    /* ENOSPC on async write */
  24        AS_MM_ALL_LOCKS = 2,    /* under mm_take_all_locks() */
  25        AS_UNEVICTABLE  = 3,    /* e.g., ramdisk, SHM_LOCK */
  26        AS_EXITING      = 4,    /* final truncate in progress */
  27        /* writeback related tags are not used */
  28        AS_NO_WRITEBACK_TAGS = 5,
  29};
  30
  31static inline void mapping_set_error(struct address_space *mapping, int error)
  32{
  33        if (unlikely(error)) {
  34                if (error == -ENOSPC)
  35                        set_bit(AS_ENOSPC, &mapping->flags);
  36                else
  37                        set_bit(AS_EIO, &mapping->flags);
  38        }
  39}
  40
  41static inline void mapping_set_unevictable(struct address_space *mapping)
  42{
  43        set_bit(AS_UNEVICTABLE, &mapping->flags);
  44}
  45
  46static inline void mapping_clear_unevictable(struct address_space *mapping)
  47{
  48        clear_bit(AS_UNEVICTABLE, &mapping->flags);
  49}
  50
  51static inline int mapping_unevictable(struct address_space *mapping)
  52{
  53        if (mapping)
  54                return test_bit(AS_UNEVICTABLE, &mapping->flags);
  55        return !!mapping;
  56}
  57
  58static inline void mapping_set_exiting(struct address_space *mapping)
  59{
  60        set_bit(AS_EXITING, &mapping->flags);
  61}
  62
  63static inline int mapping_exiting(struct address_space *mapping)
  64{
  65        return test_bit(AS_EXITING, &mapping->flags);
  66}
  67
  68static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  69{
  70        set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  71}
  72
  73static inline int mapping_use_writeback_tags(struct address_space *mapping)
  74{
  75        return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  76}
  77
  78static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  79{
  80        return mapping->gfp_mask;
  81}
  82
  83/* Restricts the given gfp_mask to what the mapping allows. */
  84static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
  85                gfp_t gfp_mask)
  86{
  87        return mapping_gfp_mask(mapping) & gfp_mask;
  88}
  89
  90/*
  91 * This is non-atomic.  Only to be used before the mapping is activated.
  92 * Probably needs a barrier...
  93 */
  94static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  95{
  96        m->gfp_mask = mask;
  97}
  98
  99void release_pages(struct page **pages, int nr, bool cold);
 100
 101/*
 102 * speculatively take a reference to a page.
 103 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
 104 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
 105 *
 106 * This function must be called inside the same rcu_read_lock() section as has
 107 * been used to lookup the page in the pagecache radix-tree (or page table):
 108 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
 109 *
 110 * Unless an RCU grace period has passed, the count of all pages coming out
 111 * of the allocator must be considered unstable. page_count may return higher
 112 * than expected, and put_page must be able to do the right thing when the
 113 * page has been finished with, no matter what it is subsequently allocated
 114 * for (because put_page is what is used here to drop an invalid speculative
 115 * reference).
 116 *
 117 * This is the interesting part of the lockless pagecache (and lockless
 118 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
 119 * has the following pattern:
 120 * 1. find page in radix tree
 121 * 2. conditionally increment refcount
 122 * 3. check the page is still in pagecache (if no, goto 1)
 123 *
 124 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
 125 * following (with tree_lock held for write):
 126 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
 127 * B. remove page from pagecache
 128 * C. free the page
 129 *
 130 * There are 2 critical interleavings that matter:
 131 * - 2 runs before A: in this case, A sees elevated refcount and bails out
 132 * - A runs before 2: in this case, 2 sees zero refcount and retries;
 133 *   subsequently, B will complete and 1 will find no page, causing the
 134 *   lookup to return NULL.
 135 *
 136 * It is possible that between 1 and 2, the page is removed then the exact same
 137 * page is inserted into the same position in pagecache. That's OK: the
 138 * old find_get_page using tree_lock could equally have run before or after
 139 * such a re-insertion, depending on order that locks are granted.
 140 *
 141 * Lookups racing against pagecache insertion isn't a big problem: either 1
 142 * will find the page or it will not. Likewise, the old find_get_page could run
 143 * either before the insertion or afterwards, depending on timing.
 144 */
 145static inline int page_cache_get_speculative(struct page *page)
 146{
 147        VM_BUG_ON(in_interrupt());
 148
 149#ifdef CONFIG_TINY_RCU
 150# ifdef CONFIG_PREEMPT_COUNT
 151        VM_BUG_ON(!in_atomic() && !irqs_disabled());
 152# endif
 153        /*
 154         * Preempt must be disabled here - we rely on rcu_read_lock doing
 155         * this for us.
 156         *
 157         * Pagecache won't be truncated from interrupt context, so if we have
 158         * found a page in the radix tree here, we have pinned its refcount by
 159         * disabling preempt, and hence no need for the "speculative get" that
 160         * SMP requires.
 161         */
 162        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 163        page_ref_inc(page);
 164
 165#else
 166        if (unlikely(!get_page_unless_zero(page))) {
 167                /*
 168                 * Either the page has been freed, or will be freed.
 169                 * In either case, retry here and the caller should
 170                 * do the right thing (see comments above).
 171                 */
 172                return 0;
 173        }
 174#endif
 175        VM_BUG_ON_PAGE(PageTail(page), page);
 176
 177        return 1;
 178}
 179
 180/*
 181 * Same as above, but add instead of inc (could just be merged)
 182 */
 183static inline int page_cache_add_speculative(struct page *page, int count)
 184{
 185        VM_BUG_ON(in_interrupt());
 186
 187#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
 188# ifdef CONFIG_PREEMPT_COUNT
 189        VM_BUG_ON(!in_atomic() && !irqs_disabled());
 190# endif
 191        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 192        page_ref_add(page, count);
 193
 194#else
 195        if (unlikely(!page_ref_add_unless(page, count, 0)))
 196                return 0;
 197#endif
 198        VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
 199
 200        return 1;
 201}
 202
 203#ifdef CONFIG_NUMA
 204extern struct page *__page_cache_alloc(gfp_t gfp);
 205#else
 206static inline struct page *__page_cache_alloc(gfp_t gfp)
 207{
 208        return alloc_pages(gfp, 0);
 209}
 210#endif
 211
 212static inline struct page *page_cache_alloc(struct address_space *x)
 213{
 214        return __page_cache_alloc(mapping_gfp_mask(x));
 215}
 216
 217static inline struct page *page_cache_alloc_cold(struct address_space *x)
 218{
 219        return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
 220}
 221
 222static inline gfp_t readahead_gfp_mask(struct address_space *x)
 223{
 224        return mapping_gfp_mask(x) |
 225                                  __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
 226}
 227
 228typedef int filler_t(void *, struct page *);
 229
 230pgoff_t page_cache_next_hole(struct address_space *mapping,
 231                             pgoff_t index, unsigned long max_scan);
 232pgoff_t page_cache_prev_hole(struct address_space *mapping,
 233                             pgoff_t index, unsigned long max_scan);
 234
 235#define FGP_ACCESSED            0x00000001
 236#define FGP_LOCK                0x00000002
 237#define FGP_CREAT               0x00000004
 238#define FGP_WRITE               0x00000008
 239#define FGP_NOFS                0x00000010
 240#define FGP_NOWAIT              0x00000020
 241
 242struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 243                int fgp_flags, gfp_t cache_gfp_mask);
 244
 245/**
 246 * find_get_page - find and get a page reference
 247 * @mapping: the address_space to search
 248 * @offset: the page index
 249 *
 250 * Looks up the page cache slot at @mapping & @offset.  If there is a
 251 * page cache page, it is returned with an increased refcount.
 252 *
 253 * Otherwise, %NULL is returned.
 254 */
 255static inline struct page *find_get_page(struct address_space *mapping,
 256                                        pgoff_t offset)
 257{
 258        return pagecache_get_page(mapping, offset, 0, 0);
 259}
 260
 261static inline struct page *find_get_page_flags(struct address_space *mapping,
 262                                        pgoff_t offset, int fgp_flags)
 263{
 264        return pagecache_get_page(mapping, offset, fgp_flags, 0);
 265}
 266
 267/**
 268 * find_lock_page - locate, pin and lock a pagecache page
 269 * @mapping: the address_space to search
 270 * @offset: the page index
 271 *
 272 * Looks up the page cache slot at @mapping & @offset.  If there is a
 273 * page cache page, it is returned locked and with an increased
 274 * refcount.
 275 *
 276 * Otherwise, %NULL is returned.
 277 *
 278 * find_lock_page() may sleep.
 279 */
 280static inline struct page *find_lock_page(struct address_space *mapping,
 281                                        pgoff_t offset)
 282{
 283        return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
 284}
 285
 286/**
 287 * find_or_create_page - locate or add a pagecache page
 288 * @mapping: the page's address_space
 289 * @index: the page's index into the mapping
 290 * @gfp_mask: page allocation mode
 291 *
 292 * Looks up the page cache slot at @mapping & @offset.  If there is a
 293 * page cache page, it is returned locked and with an increased
 294 * refcount.
 295 *
 296 * If the page is not present, a new page is allocated using @gfp_mask
 297 * and added to the page cache and the VM's LRU list.  The page is
 298 * returned locked and with an increased refcount.
 299 *
 300 * On memory exhaustion, %NULL is returned.
 301 *
 302 * find_or_create_page() may sleep, even if @gfp_flags specifies an
 303 * atomic allocation!
 304 */
 305static inline struct page *find_or_create_page(struct address_space *mapping,
 306                                        pgoff_t offset, gfp_t gfp_mask)
 307{
 308        return pagecache_get_page(mapping, offset,
 309                                        FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
 310                                        gfp_mask);
 311}
 312
 313/**
 314 * grab_cache_page_nowait - returns locked page at given index in given cache
 315 * @mapping: target address_space
 316 * @index: the page index
 317 *
 318 * Same as grab_cache_page(), but do not wait if the page is unavailable.
 319 * This is intended for speculative data generators, where the data can
 320 * be regenerated if the page couldn't be grabbed.  This routine should
 321 * be safe to call while holding the lock for another page.
 322 *
 323 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
 324 * and deadlock against the caller's locked page.
 325 */
 326static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
 327                                pgoff_t index)
 328{
 329        return pagecache_get_page(mapping, index,
 330                        FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
 331                        mapping_gfp_mask(mapping));
 332}
 333
 334struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
 335struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
 336unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
 337                          unsigned int nr_entries, struct page **entries,
 338                          pgoff_t *indices);
 339unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 340                        unsigned int nr_pages, struct page **pages);
 341unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 342                               unsigned int nr_pages, struct page **pages);
 343unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 344                        int tag, unsigned int nr_pages, struct page **pages);
 345unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
 346                        int tag, unsigned int nr_entries,
 347                        struct page **entries, pgoff_t *indices);
 348
 349struct page *grab_cache_page_write_begin(struct address_space *mapping,
 350                        pgoff_t index, unsigned flags);
 351
 352/*
 353 * Returns locked page at given index in given cache, creating it if needed.
 354 */
 355static inline struct page *grab_cache_page(struct address_space *mapping,
 356                                                                pgoff_t index)
 357{
 358        return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 359}
 360
 361extern struct page * read_cache_page(struct address_space *mapping,
 362                                pgoff_t index, filler_t *filler, void *data);
 363extern struct page * read_cache_page_gfp(struct address_space *mapping,
 364                                pgoff_t index, gfp_t gfp_mask);
 365extern int read_cache_pages(struct address_space *mapping,
 366                struct list_head *pages, filler_t *filler, void *data);
 367
 368static inline struct page *read_mapping_page(struct address_space *mapping,
 369                                pgoff_t index, void *data)
 370{
 371        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
 372        return read_cache_page(mapping, index, filler, data);
 373}
 374
 375/*
 376 * Get index of the page with in radix-tree
 377 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
 378 */
 379static inline pgoff_t page_to_index(struct page *page)
 380{
 381        pgoff_t pgoff;
 382
 383        if (likely(!PageTransTail(page)))
 384                return page->index;
 385
 386        /*
 387         *  We don't initialize ->index for tail pages: calculate based on
 388         *  head page
 389         */
 390        pgoff = compound_head(page)->index;
 391        pgoff += page - compound_head(page);
 392        return pgoff;
 393}
 394
 395/*
 396 * Get the offset in PAGE_SIZE.
 397 * (TODO: hugepage should have ->index in PAGE_SIZE)
 398 */
 399static inline pgoff_t page_to_pgoff(struct page *page)
 400{
 401        if (unlikely(PageHeadHuge(page)))
 402                return page->index << compound_order(page);
 403
 404        return page_to_index(page);
 405}
 406
 407/*
 408 * Return byte-offset into filesystem object for page.
 409 */
 410static inline loff_t page_offset(struct page *page)
 411{
 412        return ((loff_t)page->index) << PAGE_SHIFT;
 413}
 414
 415static inline loff_t page_file_offset(struct page *page)
 416{
 417        return ((loff_t)page_index(page)) << PAGE_SHIFT;
 418}
 419
 420extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 421                                     unsigned long address);
 422
 423static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 424                                        unsigned long address)
 425{
 426        pgoff_t pgoff;
 427        if (unlikely(is_vm_hugetlb_page(vma)))
 428                return linear_hugepage_index(vma, address);
 429        pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 430        pgoff += vma->vm_pgoff;
 431        return pgoff;
 432}
 433
 434extern void __lock_page(struct page *page);
 435extern int __lock_page_killable(struct page *page);
 436extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 437                                unsigned int flags);
 438extern void unlock_page(struct page *page);
 439
 440static inline int trylock_page(struct page *page)
 441{
 442        page = compound_head(page);
 443        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 444}
 445
 446/*
 447 * lock_page may only be called if we have the page's inode pinned.
 448 */
 449static inline void lock_page(struct page *page)
 450{
 451        might_sleep();
 452        if (!trylock_page(page))
 453                __lock_page(page);
 454}
 455
 456/*
 457 * lock_page_killable is like lock_page but can be interrupted by fatal
 458 * signals.  It returns 0 if it locked the page and -EINTR if it was
 459 * killed while waiting.
 460 */
 461static inline int lock_page_killable(struct page *page)
 462{
 463        might_sleep();
 464        if (!trylock_page(page))
 465                return __lock_page_killable(page);
 466        return 0;
 467}
 468
 469/*
 470 * lock_page_or_retry - Lock the page, unless this would block and the
 471 * caller indicated that it can handle a retry.
 472 *
 473 * Return value and mmap_sem implications depend on flags; see
 474 * __lock_page_or_retry().
 475 */
 476static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 477                                     unsigned int flags)
 478{
 479        might_sleep();
 480        return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
 481}
 482
 483/*
 484 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
 485 * and should not be used directly.
 486 */
 487extern void wait_on_page_bit(struct page *page, int bit_nr);
 488extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 489
 490/* 
 491 * Wait for a page to be unlocked.
 492 *
 493 * This must be called with the caller "holding" the page,
 494 * ie with increased "page->count" so that the page won't
 495 * go away during the wait..
 496 */
 497static inline void wait_on_page_locked(struct page *page)
 498{
 499        if (PageLocked(page))
 500                wait_on_page_bit(compound_head(page), PG_locked);
 501}
 502
 503static inline int wait_on_page_locked_killable(struct page *page)
 504{
 505        if (!PageLocked(page))
 506                return 0;
 507        return wait_on_page_bit_killable(compound_head(page), PG_locked);
 508}
 509
 510/* 
 511 * Wait for a page to complete writeback
 512 */
 513static inline void wait_on_page_writeback(struct page *page)
 514{
 515        if (PageWriteback(page))
 516                wait_on_page_bit(page, PG_writeback);
 517}
 518
 519extern void end_page_writeback(struct page *page);
 520void wait_for_stable_page(struct page *page);
 521
 522void page_endio(struct page *page, bool is_write, int err);
 523
 524/*
 525 * Add an arbitrary waiter to a page's wait queue
 526 */
 527extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
 528
 529/*
 530 * Fault everything in given userspace address range in.
 531 */
 532static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 533{
 534        char __user *end = uaddr + size - 1;
 535
 536        if (unlikely(size == 0))
 537                return 0;
 538
 539        if (unlikely(uaddr > end))
 540                return -EFAULT;
 541        /*
 542         * Writing zeroes into userspace here is OK, because we know that if
 543         * the zero gets there, we'll be overwriting it.
 544         */
 545        do {
 546                if (unlikely(__put_user(0, uaddr) != 0))
 547                        return -EFAULT;
 548                uaddr += PAGE_SIZE;
 549        } while (uaddr <= end);
 550
 551        /* Check whether the range spilled into the next page. */
 552        if (((unsigned long)uaddr & PAGE_MASK) ==
 553                        ((unsigned long)end & PAGE_MASK))
 554                return __put_user(0, end);
 555
 556        return 0;
 557}
 558
 559static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 560{
 561        volatile char c;
 562        const char __user *end = uaddr + size - 1;
 563
 564        if (unlikely(size == 0))
 565                return 0;
 566
 567        if (unlikely(uaddr > end))
 568                return -EFAULT;
 569
 570        do {
 571                if (unlikely(__get_user(c, uaddr) != 0))
 572                        return -EFAULT;
 573                uaddr += PAGE_SIZE;
 574        } while (uaddr <= end);
 575
 576        /* Check whether the range spilled into the next page. */
 577        if (((unsigned long)uaddr & PAGE_MASK) ==
 578                        ((unsigned long)end & PAGE_MASK)) {
 579                return __get_user(c, end);
 580        }
 581
 582        (void)c;
 583        return 0;
 584}
 585
 586int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 587                                pgoff_t index, gfp_t gfp_mask);
 588int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 589                                pgoff_t index, gfp_t gfp_mask);
 590extern void delete_from_page_cache(struct page *page);
 591extern void __delete_from_page_cache(struct page *page, void *shadow);
 592int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 593
 594/*
 595 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 596 * the page is new, so we can just run __SetPageLocked() against it.
 597 */
 598static inline int add_to_page_cache(struct page *page,
 599                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 600{
 601        int error;
 602
 603        __SetPageLocked(page);
 604        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 605        if (unlikely(error))
 606                __ClearPageLocked(page);
 607        return error;
 608}
 609
 610static inline unsigned long dir_pages(struct inode *inode)
 611{
 612        return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
 613                               PAGE_SHIFT;
 614}
 615
 616#endif /* _LINUX_PAGEMAP_H */
 617