linux/include/linux/rmap.h
<<
>>
Prefs
   1#ifndef _LINUX_RMAP_H
   2#define _LINUX_RMAP_H
   3/*
   4 * Declarations for Reverse Mapping functions in mm/rmap.c
   5 */
   6
   7#include <linux/list.h>
   8#include <linux/slab.h>
   9#include <linux/mm.h>
  10#include <linux/spinlock.h>
  11#include <linux/memcontrol.h>
  12
  13/*
  14 * The anon_vma heads a list of private "related" vmas, to scan if
  15 * an anonymous page pointing to this anon_vma needs to be unmapped:
  16 * the vmas on the list will be related by forking, or by splitting.
  17 *
  18 * Since vmas come and go as they are split and merged (particularly
  19 * in mprotect), the mapping field of an anonymous page cannot point
  20 * directly to a vma: instead it points to an anon_vma, on whose list
  21 * the related vmas can be easily linked or unlinked.
  22 *
  23 * After unlinking the last vma on the list, we must garbage collect
  24 * the anon_vma object itself: we're guaranteed no page can be
  25 * pointing to this anon_vma once its vma list is empty.
  26 */
  27struct anon_vma {
  28        struct anon_vma *root;  /* Root of this anon_vma tree */
  29        spinlock_t lock;        /* Serialize access to vma list */
  30#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
  31
  32        /*
  33         * The external_refcount is taken by either KSM or page migration
  34         * to take a reference to an anon_vma when there is no
  35         * guarantee that the vma of page tables will exist for
  36         * the duration of the operation. A caller that takes
  37         * the reference is responsible for clearing up the
  38         * anon_vma if they are the last user on release
  39         */
  40        atomic_t external_refcount;
  41#endif
  42        /*
  43         * NOTE: the LSB of the head.next is set by
  44         * mm_take_all_locks() _after_ taking the above lock. So the
  45         * head must only be read/written after taking the above lock
  46         * to be sure to see a valid next pointer. The LSB bit itself
  47         * is serialized by a system wide lock only visible to
  48         * mm_take_all_locks() (mm_all_locks_mutex).
  49         */
  50        struct list_head head;  /* Chain of private "related" vmas */
  51};
  52
  53/*
  54 * The copy-on-write semantics of fork mean that an anon_vma
  55 * can become associated with multiple processes. Furthermore,
  56 * each child process will have its own anon_vma, where new
  57 * pages for that process are instantiated.
  58 *
  59 * This structure allows us to find the anon_vmas associated
  60 * with a VMA, or the VMAs associated with an anon_vma.
  61 * The "same_vma" list contains the anon_vma_chains linking
  62 * all the anon_vmas associated with this VMA.
  63 * The "same_anon_vma" list contains the anon_vma_chains
  64 * which link all the VMAs associated with this anon_vma.
  65 */
  66struct anon_vma_chain {
  67        struct vm_area_struct *vma;
  68        struct anon_vma *anon_vma;
  69        struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
  70        struct list_head same_anon_vma; /* locked by anon_vma->lock */
  71};
  72
  73#ifdef CONFIG_MMU
  74#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
  75static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
  76{
  77        atomic_set(&anon_vma->external_refcount, 0);
  78}
  79
  80static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
  81{
  82        return atomic_read(&anon_vma->external_refcount);
  83}
  84
  85static inline void get_anon_vma(struct anon_vma *anon_vma)
  86{
  87        atomic_inc(&anon_vma->external_refcount);
  88}
  89
  90void drop_anon_vma(struct anon_vma *);
  91#else
  92static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
  93{
  94}
  95
  96static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
  97{
  98        return 0;
  99}
 100
 101static inline void get_anon_vma(struct anon_vma *anon_vma)
 102{
 103}
 104
 105static inline void drop_anon_vma(struct anon_vma *anon_vma)
 106{
 107}
 108#endif /* CONFIG_KSM */
 109
 110static inline struct anon_vma *page_anon_vma(struct page *page)
 111{
 112        if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
 113                                            PAGE_MAPPING_ANON)
 114                return NULL;
 115        return page_rmapping(page);
 116}
 117
 118static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
 119{
 120        struct anon_vma *anon_vma = vma->anon_vma;
 121        if (anon_vma)
 122                spin_lock(&anon_vma->root->lock);
 123}
 124
 125static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
 126{
 127        struct anon_vma *anon_vma = vma->anon_vma;
 128        if (anon_vma)
 129                spin_unlock(&anon_vma->root->lock);
 130}
 131
 132static inline void anon_vma_lock(struct anon_vma *anon_vma)
 133{
 134        spin_lock(&anon_vma->root->lock);
 135}
 136
 137static inline void anon_vma_unlock(struct anon_vma *anon_vma)
 138{
 139        spin_unlock(&anon_vma->root->lock);
 140}
 141
 142/*
 143 * anon_vma helper functions.
 144 */
 145void anon_vma_init(void);       /* create anon_vma_cachep */
 146int  anon_vma_prepare(struct vm_area_struct *);
 147void unlink_anon_vmas(struct vm_area_struct *);
 148int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
 149int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
 150void __anon_vma_link(struct vm_area_struct *);
 151void anon_vma_free(struct anon_vma *);
 152
 153static inline void anon_vma_merge(struct vm_area_struct *vma,
 154                                  struct vm_area_struct *next)
 155{
 156        VM_BUG_ON(vma->anon_vma != next->anon_vma);
 157        unlink_anon_vmas(next);
 158}
 159
 160/*
 161 * rmap interfaces called when adding or removing pte of page
 162 */
 163void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 164void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 165void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
 166                           unsigned long, int);
 167void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 168void page_add_file_rmap(struct page *);
 169void page_remove_rmap(struct page *);
 170
 171void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
 172                            unsigned long);
 173void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 174                                unsigned long);
 175
 176static inline void page_dup_rmap(struct page *page)
 177{
 178        atomic_inc(&page->_mapcount);
 179}
 180
 181/*
 182 * Called from mm/vmscan.c to handle paging out
 183 */
 184int page_referenced(struct page *, int is_locked,
 185                        struct mem_cgroup *cnt, unsigned long *vm_flags);
 186int page_referenced_one(struct page *, struct vm_area_struct *,
 187        unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
 188
 189enum ttu_flags {
 190        TTU_UNMAP = 0,                  /* unmap mode */
 191        TTU_MIGRATION = 1,              /* migration mode */
 192        TTU_MUNLOCK = 2,                /* munlock mode */
 193        TTU_ACTION_MASK = 0xff,
 194
 195        TTU_IGNORE_MLOCK = (1 << 8),    /* ignore mlock */
 196        TTU_IGNORE_ACCESS = (1 << 9),   /* don't age */
 197        TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
 198};
 199#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 200
 201bool is_vma_temporary_stack(struct vm_area_struct *vma);
 202
 203int try_to_unmap(struct page *, enum ttu_flags flags);
 204int try_to_unmap_one(struct page *, struct vm_area_struct *,
 205                        unsigned long address, enum ttu_flags flags);
 206
 207/*
 208 * Called from mm/filemap_xip.c to unmap empty zero page
 209 */
 210pte_t *__page_check_address(struct page *, struct mm_struct *,
 211                                unsigned long, spinlock_t **, int);
 212
 213static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
 214                                        unsigned long address,
 215                                        spinlock_t **ptlp, int sync)
 216{
 217        pte_t *ptep;
 218
 219        __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
 220                                                       ptlp, sync));
 221        return ptep;
 222}
 223
 224/*
 225 * Used by swapoff to help locate where page is expected in vma.
 226 */
 227unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
 228
 229/*
 230 * Cleans the PTEs of shared mappings.
 231 * (and since clean PTEs should also be readonly, write protects them too)
 232 *
 233 * returns the number of cleaned PTEs.
 234 */
 235int page_mkclean(struct page *);
 236
 237/*
 238 * called in munlock()/munmap() path to check for other vmas holding
 239 * the page mlocked.
 240 */
 241int try_to_munlock(struct page *);
 242
 243/*
 244 * Called by memory-failure.c to kill processes.
 245 */
 246struct anon_vma *__page_lock_anon_vma(struct page *page);
 247
 248static inline struct anon_vma *page_lock_anon_vma(struct page *page)
 249{
 250        struct anon_vma *anon_vma;
 251
 252        __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
 253
 254        /* (void) is needed to make gcc happy */
 255        (void) __cond_lock(&anon_vma->root->lock, anon_vma);
 256
 257        return anon_vma;
 258}
 259
 260void page_unlock_anon_vma(struct anon_vma *anon_vma);
 261int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 262
 263/*
 264 * Called by migrate.c to remove migration ptes, but might be used more later.
 265 */
 266int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
 267                struct vm_area_struct *, unsigned long, void *), void *arg);
 268
 269#else   /* !CONFIG_MMU */
 270
 271#define anon_vma_init()         do {} while (0)
 272#define anon_vma_prepare(vma)   (0)
 273#define anon_vma_link(vma)      do {} while (0)
 274
 275static inline int page_referenced(struct page *page, int is_locked,
 276                                  struct mem_cgroup *cnt,
 277                                  unsigned long *vm_flags)
 278{
 279        *vm_flags = 0;
 280        return 0;
 281}
 282
 283#define try_to_unmap(page, refs) SWAP_FAIL
 284
 285static inline int page_mkclean(struct page *page)
 286{
 287        return 0;
 288}
 289
 290
 291#endif  /* CONFIG_MMU */
 292
 293/*
 294 * Return values of try_to_unmap
 295 */
 296#define SWAP_SUCCESS    0
 297#define SWAP_AGAIN      1
 298#define SWAP_FAIL       2
 299#define SWAP_MLOCK      3
 300
 301#endif  /* _LINUX_RMAP_H */
 302