linux/include/linux/pagemap.h
<<
>>
Prefs
   1#ifndef _LINUX_PAGEMAP_H
   2#define _LINUX_PAGEMAP_H
   3
   4/*
   5 * Copyright 1995 Linus Torvalds
   6 */
   7#include <linux/mm.h>
   8#include <linux/fs.h>
   9#include <linux/list.h>
  10#include <linux/highmem.h>
  11#include <linux/compiler.h>
  12#include <asm/uaccess.h>
  13#include <linux/gfp.h>
  14#include <linux/bitops.h>
  15#include <linux/hardirq.h> /* for in_interrupt() */
  16
  17/*
  18 * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
  19 * allocation mode flags.
  20 */
  21enum mapping_flags {
  22        AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
  23        AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
  24        AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
  25        AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
  26};
  27
  28static inline void mapping_set_error(struct address_space *mapping, int error)
  29{
  30        if (unlikely(error)) {
  31                if (error == -ENOSPC)
  32                        set_bit(AS_ENOSPC, &mapping->flags);
  33                else
  34                        set_bit(AS_EIO, &mapping->flags);
  35        }
  36}
  37
  38static inline void mapping_set_unevictable(struct address_space *mapping)
  39{
  40        set_bit(AS_UNEVICTABLE, &mapping->flags);
  41}
  42
  43static inline void mapping_clear_unevictable(struct address_space *mapping)
  44{
  45        clear_bit(AS_UNEVICTABLE, &mapping->flags);
  46}
  47
  48static inline int mapping_unevictable(struct address_space *mapping)
  49{
  50        if (likely(mapping))
  51                return test_bit(AS_UNEVICTABLE, &mapping->flags);
  52        return !!mapping;
  53}
  54
  55static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  56{
  57        return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
  58}
  59
  60/*
  61 * This is non-atomic.  Only to be used before the mapping is activated.
  62 * Probably needs a barrier...
  63 */
  64static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  65{
  66        m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
  67                                (__force unsigned long)mask;
  68}
  69
  70/*
  71 * The page cache can done in larger chunks than
  72 * one page, because it allows for more efficient
  73 * throughput (it can then be mapped into user
  74 * space in smaller chunks for same flexibility).
  75 *
  76 * Or rather, it _will_ be done in larger chunks.
  77 */
  78#define PAGE_CACHE_SHIFT        PAGE_SHIFT
  79#define PAGE_CACHE_SIZE         PAGE_SIZE
  80#define PAGE_CACHE_MASK         PAGE_MASK
  81#define PAGE_CACHE_ALIGN(addr)  (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
  82
  83#define page_cache_get(page)            get_page(page)
  84#define page_cache_release(page)        put_page(page)
  85void release_pages(struct page **pages, int nr, int cold);
  86
  87/*
  88 * speculatively take a reference to a page.
  89 * If the page is free (_count == 0), then _count is untouched, and 0
  90 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
  91 *
  92 * This function must be called inside the same rcu_read_lock() section as has
  93 * been used to lookup the page in the pagecache radix-tree (or page table):
  94 * this allows allocators to use a synchronize_rcu() to stabilize _count.
  95 *
  96 * Unless an RCU grace period has passed, the count of all pages coming out
  97 * of the allocator must be considered unstable. page_count may return higher
  98 * than expected, and put_page must be able to do the right thing when the
  99 * page has been finished with, no matter what it is subsequently allocated
 100 * for (because put_page is what is used here to drop an invalid speculative
 101 * reference).
 102 *
 103 * This is the interesting part of the lockless pagecache (and lockless
 104 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
 105 * has the following pattern:
 106 * 1. find page in radix tree
 107 * 2. conditionally increment refcount
 108 * 3. check the page is still in pagecache (if no, goto 1)
 109 *
 110 * Remove-side that cares about stability of _count (eg. reclaim) has the
 111 * following (with tree_lock held for write):
 112 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
 113 * B. remove page from pagecache
 114 * C. free the page
 115 *
 116 * There are 2 critical interleavings that matter:
 117 * - 2 runs before A: in this case, A sees elevated refcount and bails out
 118 * - A runs before 2: in this case, 2 sees zero refcount and retries;
 119 *   subsequently, B will complete and 1 will find no page, causing the
 120 *   lookup to return NULL.
 121 *
 122 * It is possible that between 1 and 2, the page is removed then the exact same
 123 * page is inserted into the same position in pagecache. That's OK: the
 124 * old find_get_page using tree_lock could equally have run before or after
 125 * such a re-insertion, depending on order that locks are granted.
 126 *
 127 * Lookups racing against pagecache insertion isn't a big problem: either 1
 128 * will find the page or it will not. Likewise, the old find_get_page could run
 129 * either before the insertion or afterwards, depending on timing.
 130 */
 131static inline int page_cache_get_speculative(struct page *page)
 132{
 133        VM_BUG_ON(in_interrupt());
 134
 135#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
 136# ifdef CONFIG_PREEMPT
 137        VM_BUG_ON(!in_atomic());
 138# endif
 139        /*
 140         * Preempt must be disabled here - we rely on rcu_read_lock doing
 141         * this for us.
 142         *
 143         * Pagecache won't be truncated from interrupt context, so if we have
 144         * found a page in the radix tree here, we have pinned its refcount by
 145         * disabling preempt, and hence no need for the "speculative get" that
 146         * SMP requires.
 147         */
 148        VM_BUG_ON(page_count(page) == 0);
 149        atomic_inc(&page->_count);
 150
 151#else
 152        if (unlikely(!get_page_unless_zero(page))) {
 153                /*
 154                 * Either the page has been freed, or will be freed.
 155                 * In either case, retry here and the caller should
 156                 * do the right thing (see comments above).
 157                 */
 158                return 0;
 159        }
 160#endif
 161        VM_BUG_ON(PageTail(page));
 162
 163        return 1;
 164}
 165
 166/*
 167 * Same as above, but add instead of inc (could just be merged)
 168 */
 169static inline int page_cache_add_speculative(struct page *page, int count)
 170{
 171        VM_BUG_ON(in_interrupt());
 172
 173#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
 174# ifdef CONFIG_PREEMPT
 175        VM_BUG_ON(!in_atomic());
 176# endif
 177        VM_BUG_ON(page_count(page) == 0);
 178        atomic_add(count, &page->_count);
 179
 180#else
 181        if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
 182                return 0;
 183#endif
 184        VM_BUG_ON(PageCompound(page) && page != compound_head(page));
 185
 186        return 1;
 187}
 188
 189static inline int page_freeze_refs(struct page *page, int count)
 190{
 191        return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
 192}
 193
 194static inline void page_unfreeze_refs(struct page *page, int count)
 195{
 196        VM_BUG_ON(page_count(page) != 0);
 197        VM_BUG_ON(count == 0);
 198
 199        atomic_set(&page->_count, count);
 200}
 201
 202#ifdef CONFIG_NUMA
 203extern struct page *__page_cache_alloc(gfp_t gfp);
 204#else
 205static inline struct page *__page_cache_alloc(gfp_t gfp)
 206{
 207        return alloc_pages(gfp, 0);
 208}
 209#endif
 210
 211static inline struct page *page_cache_alloc(struct address_space *x)
 212{
 213        return __page_cache_alloc(mapping_gfp_mask(x));
 214}
 215
 216static inline struct page *page_cache_alloc_cold(struct address_space *x)
 217{
 218        return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
 219}
 220
 221typedef int filler_t(void *, struct page *);
 222
 223extern struct page * find_get_page(struct address_space *mapping,
 224                                pgoff_t index);
 225extern struct page * find_lock_page(struct address_space *mapping,
 226                                pgoff_t index);
 227extern struct page * find_or_create_page(struct address_space *mapping,
 228                                pgoff_t index, gfp_t gfp_mask);
 229unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 230                        unsigned int nr_pages, struct page **pages);
 231unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 232                               unsigned int nr_pages, struct page **pages);
 233unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 234                        int tag, unsigned int nr_pages, struct page **pages);
 235
 236struct page *grab_cache_page_write_begin(struct address_space *mapping,
 237                        pgoff_t index, unsigned flags);
 238
 239/*
 240 * Returns locked page at given index in given cache, creating it if needed.
 241 */
 242static inline struct page *grab_cache_page(struct address_space *mapping,
 243                                                                pgoff_t index)
 244{
 245        return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 246}
 247
 248extern struct page * grab_cache_page_nowait(struct address_space *mapping,
 249                                pgoff_t index);
 250extern struct page * read_cache_page_async(struct address_space *mapping,
 251                                pgoff_t index, filler_t *filler,
 252                                void *data);
 253extern struct page * read_cache_page(struct address_space *mapping,
 254                                pgoff_t index, filler_t *filler,
 255                                void *data);
 256extern int read_cache_pages(struct address_space *mapping,
 257                struct list_head *pages, filler_t *filler, void *data);
 258
 259static inline struct page *read_mapping_page_async(
 260                                                struct address_space *mapping,
 261                                                     pgoff_t index, void *data)
 262{
 263        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
 264        return read_cache_page_async(mapping, index, filler, data);
 265}
 266
 267static inline struct page *read_mapping_page(struct address_space *mapping,
 268                                             pgoff_t index, void *data)
 269{
 270        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
 271        return read_cache_page(mapping, index, filler, data);
 272}
 273
 274/*
 275 * Return byte-offset into filesystem object for page.
 276 */
 277static inline loff_t page_offset(struct page *page)
 278{
 279        return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
 280}
 281
 282static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 283                                        unsigned long address)
 284{
 285        pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 286        pgoff += vma->vm_pgoff;
 287        return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 288}
 289
 290extern void __lock_page(struct page *page);
 291extern int __lock_page_killable(struct page *page);
 292extern void __lock_page_nosync(struct page *page);
 293extern void unlock_page(struct page *page);
 294
 295static inline void __set_page_locked(struct page *page)
 296{
 297        __set_bit(PG_locked, &page->flags);
 298}
 299
 300static inline void __clear_page_locked(struct page *page)
 301{
 302        __clear_bit(PG_locked, &page->flags);
 303}
 304
 305static inline int trylock_page(struct page *page)
 306{
 307        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 308}
 309
 310/*
 311 * lock_page may only be called if we have the page's inode pinned.
 312 */
 313static inline void lock_page(struct page *page)
 314{
 315        might_sleep();
 316        if (!trylock_page(page))
 317                __lock_page(page);
 318}
 319
 320/*
 321 * lock_page_killable is like lock_page but can be interrupted by fatal
 322 * signals.  It returns 0 if it locked the page and -EINTR if it was
 323 * killed while waiting.
 324 */
 325static inline int lock_page_killable(struct page *page)
 326{
 327        might_sleep();
 328        if (!trylock_page(page))
 329                return __lock_page_killable(page);
 330        return 0;
 331}
 332
 333/*
 334 * lock_page_nosync should only be used if we can't pin the page's inode.
 335 * Doesn't play quite so well with block device plugging.
 336 */
 337static inline void lock_page_nosync(struct page *page)
 338{
 339        might_sleep();
 340        if (!trylock_page(page))
 341                __lock_page_nosync(page);
 342}
 343        
 344/*
 345 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
 346 * Never use this directly!
 347 */
 348extern void wait_on_page_bit(struct page *page, int bit_nr);
 349
 350/* 
 351 * Wait for a page to be unlocked.
 352 *
 353 * This must be called with the caller "holding" the page,
 354 * ie with increased "page->count" so that the page won't
 355 * go away during the wait..
 356 */
 357static inline void wait_on_page_locked(struct page *page)
 358{
 359        if (PageLocked(page))
 360                wait_on_page_bit(page, PG_locked);
 361}
 362
 363/* 
 364 * Wait for a page to complete writeback
 365 */
 366static inline void wait_on_page_writeback(struct page *page)
 367{
 368        if (PageWriteback(page))
 369                wait_on_page_bit(page, PG_writeback);
 370}
 371
 372extern void end_page_writeback(struct page *page);
 373
 374/*
 375 * Add an arbitrary waiter to a page's wait queue
 376 */
 377extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
 378
 379/*
 380 * Fault a userspace page into pagetables.  Return non-zero on a fault.
 381 *
 382 * This assumes that two userspace pages are always sufficient.  That's
 383 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
 384 */
 385static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 386{
 387        int ret;
 388
 389        if (unlikely(size == 0))
 390                return 0;
 391
 392        /*
 393         * Writing zeroes into userspace here is OK, because we know that if
 394         * the zero gets there, we'll be overwriting it.
 395         */
 396        ret = __put_user(0, uaddr);
 397        if (ret == 0) {
 398                char __user *end = uaddr + size - 1;
 399
 400                /*
 401                 * If the page was already mapped, this will get a cache miss
 402                 * for sure, so try to avoid doing it.
 403                 */
 404                if (((unsigned long)uaddr & PAGE_MASK) !=
 405                                ((unsigned long)end & PAGE_MASK))
 406                        ret = __put_user(0, end);
 407        }
 408        return ret;
 409}
 410
 411static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 412{
 413        volatile char c;
 414        int ret;
 415
 416        if (unlikely(size == 0))
 417                return 0;
 418
 419        ret = __get_user(c, uaddr);
 420        if (ret == 0) {
 421                const char __user *end = uaddr + size - 1;
 422
 423                if (((unsigned long)uaddr & PAGE_MASK) !=
 424                                ((unsigned long)end & PAGE_MASK))
 425                        ret = __get_user(c, end);
 426        }
 427        return ret;
 428}
 429
 430int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 431                                pgoff_t index, gfp_t gfp_mask);
 432int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 433                                pgoff_t index, gfp_t gfp_mask);
 434extern void remove_from_page_cache(struct page *page);
 435extern void __remove_from_page_cache(struct page *page);
 436
 437/*
 438 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 439 * the page is new, so we can just run __set_page_locked() against it.
 440 */
 441static inline int add_to_page_cache(struct page *page,
 442                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 443{
 444        int error;
 445
 446        __set_page_locked(page);
 447        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 448        if (unlikely(error))
 449                __clear_page_locked(page);
 450        return error;
 451}
 452
 453#endif /* _LINUX_PAGEMAP_H */
 454