linux/include/linux/pagemap.h
<<
>>
Prefs
   1#ifndef _LINUX_PAGEMAP_H
   2#define _LINUX_PAGEMAP_H
   3
   4/*
   5 * Copyright 1995 Linus Torvalds
   6 */
   7#include <linux/mm.h>
   8#include <linux/fs.h>
   9#include <linux/list.h>
  10#include <linux/highmem.h>
  11#include <linux/compiler.h>
  12#include <linux/uaccess.h>
  13#include <linux/gfp.h>
  14#include <linux/bitops.h>
  15#include <linux/hardirq.h> /* for in_interrupt() */
  16#include <linux/hugetlb_inline.h>
  17
  18/*
  19 * Bits in mapping->flags.
  20 */
  21enum mapping_flags {
  22        AS_EIO          = 0,    /* IO error on async write */
  23        AS_ENOSPC       = 1,    /* ENOSPC on async write */
  24        AS_MM_ALL_LOCKS = 2,    /* under mm_take_all_locks() */
  25        AS_UNEVICTABLE  = 3,    /* e.g., ramdisk, SHM_LOCK */
  26        AS_EXITING      = 4,    /* final truncate in progress */
  27        /* writeback related tags are not used */
  28        AS_NO_WRITEBACK_TAGS = 5,
  29};
  30
  31/**
  32 * mapping_set_error - record a writeback error in the address_space
  33 * @mapping - the mapping in which an error should be set
  34 * @error - the error to set in the mapping
  35 *
  36 * When writeback fails in some way, we must record that error so that
  37 * userspace can be informed when fsync and the like are called.  We endeavor
  38 * to report errors on any file that was open at the time of the error.  Some
  39 * internal callers also need to know when writeback errors have occurred.
  40 *
  41 * When a writeback error occurs, most filesystems will want to call
  42 * mapping_set_error to record the error in the mapping so that it can be
  43 * reported when the application calls fsync(2).
  44 */
  45static inline void mapping_set_error(struct address_space *mapping, int error)
  46{
  47        if (likely(!error))
  48                return;
  49
  50        /* Record in wb_err for checkers using errseq_t based tracking */
  51        filemap_set_wb_err(mapping, error);
  52
  53        /* Record it in flags for now, for legacy callers */
  54        if (error == -ENOSPC)
  55                set_bit(AS_ENOSPC, &mapping->flags);
  56        else
  57                set_bit(AS_EIO, &mapping->flags);
  58}
  59
  60static inline void mapping_set_unevictable(struct address_space *mapping)
  61{
  62        set_bit(AS_UNEVICTABLE, &mapping->flags);
  63}
  64
  65static inline void mapping_clear_unevictable(struct address_space *mapping)
  66{
  67        clear_bit(AS_UNEVICTABLE, &mapping->flags);
  68}
  69
  70static inline int mapping_unevictable(struct address_space *mapping)
  71{
  72        if (mapping)
  73                return test_bit(AS_UNEVICTABLE, &mapping->flags);
  74        return !!mapping;
  75}
  76
  77static inline void mapping_set_exiting(struct address_space *mapping)
  78{
  79        set_bit(AS_EXITING, &mapping->flags);
  80}
  81
  82static inline int mapping_exiting(struct address_space *mapping)
  83{
  84        return test_bit(AS_EXITING, &mapping->flags);
  85}
  86
  87static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  88{
  89        set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  90}
  91
  92static inline int mapping_use_writeback_tags(struct address_space *mapping)
  93{
  94        return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  95}
  96
  97static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  98{
  99        return mapping->gfp_mask;
 100}
 101
 102/* Restricts the given gfp_mask to what the mapping allows. */
 103static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
 104                gfp_t gfp_mask)
 105{
 106        return mapping_gfp_mask(mapping) & gfp_mask;
 107}
 108
 109/*
 110 * This is non-atomic.  Only to be used before the mapping is activated.
 111 * Probably needs a barrier...
 112 */
 113static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 114{
 115        m->gfp_mask = mask;
 116}
 117
 118void release_pages(struct page **pages, int nr, bool cold);
 119
 120/*
 121 * speculatively take a reference to a page.
 122 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
 123 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
 124 *
 125 * This function must be called inside the same rcu_read_lock() section as has
 126 * been used to lookup the page in the pagecache radix-tree (or page table):
 127 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
 128 *
 129 * Unless an RCU grace period has passed, the count of all pages coming out
 130 * of the allocator must be considered unstable. page_count may return higher
 131 * than expected, and put_page must be able to do the right thing when the
 132 * page has been finished with, no matter what it is subsequently allocated
 133 * for (because put_page is what is used here to drop an invalid speculative
 134 * reference).
 135 *
 136 * This is the interesting part of the lockless pagecache (and lockless
 137 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
 138 * has the following pattern:
 139 * 1. find page in radix tree
 140 * 2. conditionally increment refcount
 141 * 3. check the page is still in pagecache (if no, goto 1)
 142 *
 143 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
 144 * following (with tree_lock held for write):
 145 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
 146 * B. remove page from pagecache
 147 * C. free the page
 148 *
 149 * There are 2 critical interleavings that matter:
 150 * - 2 runs before A: in this case, A sees elevated refcount and bails out
 151 * - A runs before 2: in this case, 2 sees zero refcount and retries;
 152 *   subsequently, B will complete and 1 will find no page, causing the
 153 *   lookup to return NULL.
 154 *
 155 * It is possible that between 1 and 2, the page is removed then the exact same
 156 * page is inserted into the same position in pagecache. That's OK: the
 157 * old find_get_page using tree_lock could equally have run before or after
 158 * such a re-insertion, depending on order that locks are granted.
 159 *
 160 * Lookups racing against pagecache insertion isn't a big problem: either 1
 161 * will find the page or it will not. Likewise, the old find_get_page could run
 162 * either before the insertion or afterwards, depending on timing.
 163 */
 164static inline int page_cache_get_speculative(struct page *page)
 165{
 166#ifdef CONFIG_TINY_RCU
 167# ifdef CONFIG_PREEMPT_COUNT
 168        VM_BUG_ON(!in_atomic() && !irqs_disabled());
 169# endif
 170        /*
 171         * Preempt must be disabled here - we rely on rcu_read_lock doing
 172         * this for us.
 173         *
 174         * Pagecache won't be truncated from interrupt context, so if we have
 175         * found a page in the radix tree here, we have pinned its refcount by
 176         * disabling preempt, and hence no need for the "speculative get" that
 177         * SMP requires.
 178         */
 179        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 180        page_ref_inc(page);
 181
 182#else
 183        if (unlikely(!get_page_unless_zero(page))) {
 184                /*
 185                 * Either the page has been freed, or will be freed.
 186                 * In either case, retry here and the caller should
 187                 * do the right thing (see comments above).
 188                 */
 189                return 0;
 190        }
 191#endif
 192        VM_BUG_ON_PAGE(PageTail(page), page);
 193
 194        return 1;
 195}
 196
 197/*
 198 * Same as above, but add instead of inc (could just be merged)
 199 */
 200static inline int page_cache_add_speculative(struct page *page, int count)
 201{
 202        VM_BUG_ON(in_interrupt());
 203
 204#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
 205# ifdef CONFIG_PREEMPT_COUNT
 206        VM_BUG_ON(!in_atomic() && !irqs_disabled());
 207# endif
 208        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 209        page_ref_add(page, count);
 210
 211#else
 212        if (unlikely(!page_ref_add_unless(page, count, 0)))
 213                return 0;
 214#endif
 215        VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
 216
 217        return 1;
 218}
 219
 220#ifdef CONFIG_NUMA
 221extern struct page *__page_cache_alloc(gfp_t gfp);
 222#else
 223static inline struct page *__page_cache_alloc(gfp_t gfp)
 224{
 225        return alloc_pages(gfp, 0);
 226}
 227#endif
 228
 229static inline struct page *page_cache_alloc(struct address_space *x)
 230{
 231        return __page_cache_alloc(mapping_gfp_mask(x));
 232}
 233
 234static inline struct page *page_cache_alloc_cold(struct address_space *x)
 235{
 236        return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
 237}
 238
 239static inline gfp_t readahead_gfp_mask(struct address_space *x)
 240{
 241        return mapping_gfp_mask(x) |
 242                                  __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
 243}
 244
 245typedef int filler_t(void *, struct page *);
 246
 247pgoff_t page_cache_next_hole(struct address_space *mapping,
 248                             pgoff_t index, unsigned long max_scan);
 249pgoff_t page_cache_prev_hole(struct address_space *mapping,
 250                             pgoff_t index, unsigned long max_scan);
 251
 252#define FGP_ACCESSED            0x00000001
 253#define FGP_LOCK                0x00000002
 254#define FGP_CREAT               0x00000004
 255#define FGP_WRITE               0x00000008
 256#define FGP_NOFS                0x00000010
 257#define FGP_NOWAIT              0x00000020
 258
 259struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 260                int fgp_flags, gfp_t cache_gfp_mask);
 261
 262/**
 263 * find_get_page - find and get a page reference
 264 * @mapping: the address_space to search
 265 * @offset: the page index
 266 *
 267 * Looks up the page cache slot at @mapping & @offset.  If there is a
 268 * page cache page, it is returned with an increased refcount.
 269 *
 270 * Otherwise, %NULL is returned.
 271 */
 272static inline struct page *find_get_page(struct address_space *mapping,
 273                                        pgoff_t offset)
 274{
 275        return pagecache_get_page(mapping, offset, 0, 0);
 276}
 277
 278static inline struct page *find_get_page_flags(struct address_space *mapping,
 279                                        pgoff_t offset, int fgp_flags)
 280{
 281        return pagecache_get_page(mapping, offset, fgp_flags, 0);
 282}
 283
 284/**
 285 * find_lock_page - locate, pin and lock a pagecache page
 286 * @mapping: the address_space to search
 287 * @offset: the page index
 288 *
 289 * Looks up the page cache slot at @mapping & @offset.  If there is a
 290 * page cache page, it is returned locked and with an increased
 291 * refcount.
 292 *
 293 * Otherwise, %NULL is returned.
 294 *
 295 * find_lock_page() may sleep.
 296 */
 297static inline struct page *find_lock_page(struct address_space *mapping,
 298                                        pgoff_t offset)
 299{
 300        return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
 301}
 302
 303/**
 304 * find_or_create_page - locate or add a pagecache page
 305 * @mapping: the page's address_space
 306 * @index: the page's index into the mapping
 307 * @gfp_mask: page allocation mode
 308 *
 309 * Looks up the page cache slot at @mapping & @offset.  If there is a
 310 * page cache page, it is returned locked and with an increased
 311 * refcount.
 312 *
 313 * If the page is not present, a new page is allocated using @gfp_mask
 314 * and added to the page cache and the VM's LRU list.  The page is
 315 * returned locked and with an increased refcount.
 316 *
 317 * On memory exhaustion, %NULL is returned.
 318 *
 319 * find_or_create_page() may sleep, even if @gfp_flags specifies an
 320 * atomic allocation!
 321 */
 322static inline struct page *find_or_create_page(struct address_space *mapping,
 323                                        pgoff_t offset, gfp_t gfp_mask)
 324{
 325        return pagecache_get_page(mapping, offset,
 326                                        FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
 327                                        gfp_mask);
 328}
 329
 330/**
 331 * grab_cache_page_nowait - returns locked page at given index in given cache
 332 * @mapping: target address_space
 333 * @index: the page index
 334 *
 335 * Same as grab_cache_page(), but do not wait if the page is unavailable.
 336 * This is intended for speculative data generators, where the data can
 337 * be regenerated if the page couldn't be grabbed.  This routine should
 338 * be safe to call while holding the lock for another page.
 339 *
 340 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
 341 * and deadlock against the caller's locked page.
 342 */
 343static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
 344                                pgoff_t index)
 345{
 346        return pagecache_get_page(mapping, index,
 347                        FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
 348                        mapping_gfp_mask(mapping));
 349}
 350
 351struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
 352struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
 353unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
 354                          unsigned int nr_entries, struct page **entries,
 355                          pgoff_t *indices);
 356unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 357                        unsigned int nr_pages, struct page **pages);
 358unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 359                               unsigned int nr_pages, struct page **pages);
 360unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 361                        int tag, unsigned int nr_pages, struct page **pages);
 362unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
 363                        int tag, unsigned int nr_entries,
 364                        struct page **entries, pgoff_t *indices);
 365
 366struct page *grab_cache_page_write_begin(struct address_space *mapping,
 367                        pgoff_t index, unsigned flags);
 368
 369/*
 370 * Returns locked page at given index in given cache, creating it if needed.
 371 */
 372static inline struct page *grab_cache_page(struct address_space *mapping,
 373                                                                pgoff_t index)
 374{
 375        return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 376}
 377
 378extern struct page * read_cache_page(struct address_space *mapping,
 379                                pgoff_t index, filler_t *filler, void *data);
 380extern struct page * read_cache_page_gfp(struct address_space *mapping,
 381                                pgoff_t index, gfp_t gfp_mask);
 382extern int read_cache_pages(struct address_space *mapping,
 383                struct list_head *pages, filler_t *filler, void *data);
 384
 385static inline struct page *read_mapping_page(struct address_space *mapping,
 386                                pgoff_t index, void *data)
 387{
 388        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
 389        return read_cache_page(mapping, index, filler, data);
 390}
 391
 392/*
 393 * Get index of the page with in radix-tree
 394 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
 395 */
 396static inline pgoff_t page_to_index(struct page *page)
 397{
 398        pgoff_t pgoff;
 399
 400        if (likely(!PageTransTail(page)))
 401                return page->index;
 402
 403        /*
 404         *  We don't initialize ->index for tail pages: calculate based on
 405         *  head page
 406         */
 407        pgoff = compound_head(page)->index;
 408        pgoff += page - compound_head(page);
 409        return pgoff;
 410}
 411
 412/*
 413 * Get the offset in PAGE_SIZE.
 414 * (TODO: hugepage should have ->index in PAGE_SIZE)
 415 */
 416static inline pgoff_t page_to_pgoff(struct page *page)
 417{
 418        if (unlikely(PageHeadHuge(page)))
 419                return page->index << compound_order(page);
 420
 421        return page_to_index(page);
 422}
 423
 424/*
 425 * Return byte-offset into filesystem object for page.
 426 */
 427static inline loff_t page_offset(struct page *page)
 428{
 429        return ((loff_t)page->index) << PAGE_SHIFT;
 430}
 431
 432static inline loff_t page_file_offset(struct page *page)
 433{
 434        return ((loff_t)page_index(page)) << PAGE_SHIFT;
 435}
 436
 437extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 438                                     unsigned long address);
 439
 440static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 441                                        unsigned long address)
 442{
 443        pgoff_t pgoff;
 444        if (unlikely(is_vm_hugetlb_page(vma)))
 445                return linear_hugepage_index(vma, address);
 446        pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 447        pgoff += vma->vm_pgoff;
 448        return pgoff;
 449}
 450
 451extern void __lock_page(struct page *page);
 452extern int __lock_page_killable(struct page *page);
 453extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 454                                unsigned int flags);
 455extern void unlock_page(struct page *page);
 456
 457static inline int trylock_page(struct page *page)
 458{
 459        page = compound_head(page);
 460        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 461}
 462
 463/*
 464 * lock_page may only be called if we have the page's inode pinned.
 465 */
 466static inline void lock_page(struct page *page)
 467{
 468        might_sleep();
 469        if (!trylock_page(page))
 470                __lock_page(page);
 471}
 472
 473/*
 474 * lock_page_killable is like lock_page but can be interrupted by fatal
 475 * signals.  It returns 0 if it locked the page and -EINTR if it was
 476 * killed while waiting.
 477 */
 478static inline int lock_page_killable(struct page *page)
 479{
 480        might_sleep();
 481        if (!trylock_page(page))
 482                return __lock_page_killable(page);
 483        return 0;
 484}
 485
 486/*
 487 * lock_page_or_retry - Lock the page, unless this would block and the
 488 * caller indicated that it can handle a retry.
 489 *
 490 * Return value and mmap_sem implications depend on flags; see
 491 * __lock_page_or_retry().
 492 */
 493static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 494                                     unsigned int flags)
 495{
 496        might_sleep();
 497        return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
 498}
 499
 500/*
 501 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
 502 * and should not be used directly.
 503 */
 504extern void wait_on_page_bit(struct page *page, int bit_nr);
 505extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 506
 507/* 
 508 * Wait for a page to be unlocked.
 509 *
 510 * This must be called with the caller "holding" the page,
 511 * ie with increased "page->count" so that the page won't
 512 * go away during the wait..
 513 */
 514static inline void wait_on_page_locked(struct page *page)
 515{
 516        if (PageLocked(page))
 517                wait_on_page_bit(compound_head(page), PG_locked);
 518}
 519
 520static inline int wait_on_page_locked_killable(struct page *page)
 521{
 522        if (!PageLocked(page))
 523                return 0;
 524        return wait_on_page_bit_killable(compound_head(page), PG_locked);
 525}
 526
 527/* 
 528 * Wait for a page to complete writeback
 529 */
 530static inline void wait_on_page_writeback(struct page *page)
 531{
 532        if (PageWriteback(page))
 533                wait_on_page_bit(page, PG_writeback);
 534}
 535
 536extern void end_page_writeback(struct page *page);
 537void wait_for_stable_page(struct page *page);
 538
 539void page_endio(struct page *page, bool is_write, int err);
 540
 541/*
 542 * Add an arbitrary waiter to a page's wait queue
 543 */
 544extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
 545
 546/*
 547 * Fault everything in given userspace address range in.
 548 */
 549static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 550{
 551        char __user *end = uaddr + size - 1;
 552
 553        if (unlikely(size == 0))
 554                return 0;
 555
 556        if (unlikely(uaddr > end))
 557                return -EFAULT;
 558        /*
 559         * Writing zeroes into userspace here is OK, because we know that if
 560         * the zero gets there, we'll be overwriting it.
 561         */
 562        do {
 563                if (unlikely(__put_user(0, uaddr) != 0))
 564                        return -EFAULT;
 565                uaddr += PAGE_SIZE;
 566        } while (uaddr <= end);
 567
 568        /* Check whether the range spilled into the next page. */
 569        if (((unsigned long)uaddr & PAGE_MASK) ==
 570                        ((unsigned long)end & PAGE_MASK))
 571                return __put_user(0, end);
 572
 573        return 0;
 574}
 575
 576static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 577{
 578        volatile char c;
 579        const char __user *end = uaddr + size - 1;
 580
 581        if (unlikely(size == 0))
 582                return 0;
 583
 584        if (unlikely(uaddr > end))
 585                return -EFAULT;
 586
 587        do {
 588                if (unlikely(__get_user(c, uaddr) != 0))
 589                        return -EFAULT;
 590                uaddr += PAGE_SIZE;
 591        } while (uaddr <= end);
 592
 593        /* Check whether the range spilled into the next page. */
 594        if (((unsigned long)uaddr & PAGE_MASK) ==
 595                        ((unsigned long)end & PAGE_MASK)) {
 596                return __get_user(c, end);
 597        }
 598
 599        (void)c;
 600        return 0;
 601}
 602
 603int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 604                                pgoff_t index, gfp_t gfp_mask);
 605int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 606                                pgoff_t index, gfp_t gfp_mask);
 607extern void delete_from_page_cache(struct page *page);
 608extern void __delete_from_page_cache(struct page *page, void *shadow);
 609int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 610
 611/*
 612 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 613 * the page is new, so we can just run __SetPageLocked() against it.
 614 */
 615static inline int add_to_page_cache(struct page *page,
 616                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 617{
 618        int error;
 619
 620        __SetPageLocked(page);
 621        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 622        if (unlikely(error))
 623                __ClearPageLocked(page);
 624        return error;
 625}
 626
 627static inline unsigned long dir_pages(struct inode *inode)
 628{
 629        return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
 630                               PAGE_SHIFT;
 631}
 632
 633#endif /* _LINUX_PAGEMAP_H */
 634