linux/include/linux/rmap.h
<<
>>
Prefs
   1#ifndef _LINUX_RMAP_H
   2#define _LINUX_RMAP_H
   3/*
   4 * Declarations for Reverse Mapping functions in mm/rmap.c
   5 */
   6
   7#include <linux/list.h>
   8#include <linux/slab.h>
   9#include <linux/mm.h>
  10#include <linux/rwsem.h>
  11#include <linux/memcontrol.h>
  12
  13/*
  14 * The anon_vma heads a list of private "related" vmas, to scan if
  15 * an anonymous page pointing to this anon_vma needs to be unmapped:
  16 * the vmas on the list will be related by forking, or by splitting.
  17 *
  18 * Since vmas come and go as they are split and merged (particularly
  19 * in mprotect), the mapping field of an anonymous page cannot point
  20 * directly to a vma: instead it points to an anon_vma, on whose list
  21 * the related vmas can be easily linked or unlinked.
  22 *
  23 * After unlinking the last vma on the list, we must garbage collect
  24 * the anon_vma object itself: we're guaranteed no page can be
  25 * pointing to this anon_vma once its vma list is empty.
  26 */
  27struct anon_vma {
  28        struct anon_vma *root;          /* Root of this anon_vma tree */
  29        struct rw_semaphore rwsem;      /* W: modification, R: walking the list */
  30        /*
  31         * The refcount is taken on an anon_vma when there is no
  32         * guarantee that the vma of page tables will exist for
  33         * the duration of the operation. A caller that takes
  34         * the reference is responsible for clearing up the
  35         * anon_vma if they are the last user on release
  36         */
  37        atomic_t refcount;
  38
  39        /*
  40         * Count of child anon_vmas and VMAs which points to this anon_vma.
  41         *
  42         * This counter is used for making decision about reusing anon_vma
  43         * instead of forking new one. See comments in function anon_vma_clone.
  44         */
  45        unsigned degree;
  46
  47        struct anon_vma *parent;        /* Parent of this anon_vma */
  48
  49        /*
  50         * NOTE: the LSB of the rb_root.rb_node is set by
  51         * mm_take_all_locks() _after_ taking the above lock. So the
  52         * rb_root must only be read/written after taking the above lock
  53         * to be sure to see a valid next pointer. The LSB bit itself
  54         * is serialized by a system wide lock only visible to
  55         * mm_take_all_locks() (mm_all_locks_mutex).
  56         */
  57        struct rb_root rb_root; /* Interval tree of private "related" vmas */
  58};
  59
  60/*
  61 * The copy-on-write semantics of fork mean that an anon_vma
  62 * can become associated with multiple processes. Furthermore,
  63 * each child process will have its own anon_vma, where new
  64 * pages for that process are instantiated.
  65 *
  66 * This structure allows us to find the anon_vmas associated
  67 * with a VMA, or the VMAs associated with an anon_vma.
  68 * The "same_vma" list contains the anon_vma_chains linking
  69 * all the anon_vmas associated with this VMA.
  70 * The "rb" field indexes on an interval tree the anon_vma_chains
  71 * which link all the VMAs associated with this anon_vma.
  72 */
  73struct anon_vma_chain {
  74        struct vm_area_struct *vma;
  75        struct anon_vma *anon_vma;
  76        struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
  77        struct rb_node rb;                      /* locked by anon_vma->rwsem */
  78        unsigned long rb_subtree_last;
  79#ifdef CONFIG_DEBUG_VM_RB
  80        unsigned long cached_vma_start, cached_vma_last;
  81#endif
  82};
  83
  84enum ttu_flags {
  85        TTU_UNMAP = 1,                  /* unmap mode */
  86        TTU_MIGRATION = 2,              /* migration mode */
  87        TTU_MUNLOCK = 4,                /* munlock mode */
  88        TTU_LZFREE = 8,                 /* lazy free mode */
  89        TTU_SPLIT_HUGE_PMD = 16,        /* split huge PMD if any */
  90
  91        TTU_IGNORE_MLOCK = (1 << 8),    /* ignore mlock */
  92        TTU_IGNORE_ACCESS = (1 << 9),   /* don't age */
  93        TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
  94        TTU_BATCH_FLUSH = (1 << 11),    /* Batch TLB flushes where possible
  95                                         * and caller guarantees they will
  96                                         * do a final flush if necessary */
  97        TTU_RMAP_LOCKED = (1 << 12)     /* do not grab rmap lock:
  98                                         * caller holds it */
  99};
 100
 101#ifdef CONFIG_MMU
 102static inline void get_anon_vma(struct anon_vma *anon_vma)
 103{
 104        atomic_inc(&anon_vma->refcount);
 105}
 106
 107void __put_anon_vma(struct anon_vma *anon_vma);
 108
 109static inline void put_anon_vma(struct anon_vma *anon_vma)
 110{
 111        if (atomic_dec_and_test(&anon_vma->refcount))
 112                __put_anon_vma(anon_vma);
 113}
 114
 115static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
 116{
 117        down_write(&anon_vma->root->rwsem);
 118}
 119
 120static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
 121{
 122        up_write(&anon_vma->root->rwsem);
 123}
 124
 125static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
 126{
 127        down_read(&anon_vma->root->rwsem);
 128}
 129
 130static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
 131{
 132        up_read(&anon_vma->root->rwsem);
 133}
 134
 135
 136/*
 137 * anon_vma helper functions.
 138 */
 139void anon_vma_init(void);       /* create anon_vma_cachep */
 140int  anon_vma_prepare(struct vm_area_struct *);
 141void unlink_anon_vmas(struct vm_area_struct *);
 142int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
 143int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
 144
 145static inline void anon_vma_merge(struct vm_area_struct *vma,
 146                                  struct vm_area_struct *next)
 147{
 148        VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
 149        unlink_anon_vmas(next);
 150}
 151
 152struct anon_vma *page_get_anon_vma(struct page *page);
 153
 154/* bitflags for do_page_add_anon_rmap() */
 155#define RMAP_EXCLUSIVE 0x01
 156#define RMAP_COMPOUND 0x02
 157
 158/*
 159 * rmap interfaces called when adding or removing pte of page
 160 */
 161void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 162void page_add_anon_rmap(struct page *, struct vm_area_struct *,
 163                unsigned long, bool);
 164void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
 165                           unsigned long, int);
 166void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 167                unsigned long, bool);
 168void page_add_file_rmap(struct page *);
 169void page_remove_rmap(struct page *, bool);
 170
 171void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
 172                            unsigned long);
 173void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 174                                unsigned long);
 175
 176static inline void page_dup_rmap(struct page *page, bool compound)
 177{
 178        atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
 179}
 180
 181/*
 182 * Called from mm/vmscan.c to handle paging out
 183 */
 184int page_referenced(struct page *, int is_locked,
 185                        struct mem_cgroup *memcg, unsigned long *vm_flags);
 186
 187#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 188
 189int try_to_unmap(struct page *, enum ttu_flags flags);
 190
 191/*
 192 * Used by uprobes to replace a userspace page safely
 193 */
 194pte_t *__page_check_address(struct page *, struct mm_struct *,
 195                                unsigned long, spinlock_t **, int);
 196
 197static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
 198                                        unsigned long address,
 199                                        spinlock_t **ptlp, int sync)
 200{
 201        pte_t *ptep;
 202
 203        __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
 204                                                       ptlp, sync));
 205        return ptep;
 206}
 207
 208/*
 209 * Used by idle page tracking to check if a page was referenced via page
 210 * tables.
 211 */
 212#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 213bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
 214                                  unsigned long address, pmd_t **pmdp,
 215                                  pte_t **ptep, spinlock_t **ptlp);
 216#else
 217static inline bool page_check_address_transhuge(struct page *page,
 218                                struct mm_struct *mm, unsigned long address,
 219                                pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp)
 220{
 221        *ptep = page_check_address(page, mm, address, ptlp, 0);
 222        *pmdp = NULL;
 223        return !!*ptep;
 224}
 225#endif
 226
 227/*
 228 * Used by swapoff to help locate where page is expected in vma.
 229 */
 230unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
 231
 232/*
 233 * Cleans the PTEs of shared mappings.
 234 * (and since clean PTEs should also be readonly, write protects them too)
 235 *
 236 * returns the number of cleaned PTEs.
 237 */
 238int page_mkclean(struct page *);
 239
 240/*
 241 * called in munlock()/munmap() path to check for other vmas holding
 242 * the page mlocked.
 243 */
 244int try_to_munlock(struct page *);
 245
 246void remove_migration_ptes(struct page *old, struct page *new, bool locked);
 247
 248/*
 249 * Called by memory-failure.c to kill processes.
 250 */
 251struct anon_vma *page_lock_anon_vma_read(struct page *page);
 252void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
 253int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 254
 255/*
 256 * rmap_walk_control: To control rmap traversing for specific needs
 257 *
 258 * arg: passed to rmap_one() and invalid_vma()
 259 * rmap_one: executed on each vma where page is mapped
 260 * done: for checking traversing termination condition
 261 * anon_lock: for getting anon_lock by optimized way rather than default
 262 * invalid_vma: for skipping uninterested vma
 263 */
 264struct rmap_walk_control {
 265        void *arg;
 266        int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
 267                                        unsigned long addr, void *arg);
 268        int (*done)(struct page *page);
 269        struct anon_vma *(*anon_lock)(struct page *page);
 270        bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
 271};
 272
 273int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
 274int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
 275
 276#else   /* !CONFIG_MMU */
 277
 278#define anon_vma_init()         do {} while (0)
 279#define anon_vma_prepare(vma)   (0)
 280#define anon_vma_link(vma)      do {} while (0)
 281
 282static inline int page_referenced(struct page *page, int is_locked,
 283                                  struct mem_cgroup *memcg,
 284                                  unsigned long *vm_flags)
 285{
 286        *vm_flags = 0;
 287        return 0;
 288}
 289
 290#define try_to_unmap(page, refs) SWAP_FAIL
 291
 292static inline int page_mkclean(struct page *page)
 293{
 294        return 0;
 295}
 296
 297
 298#endif  /* CONFIG_MMU */
 299
 300/*
 301 * Return values of try_to_unmap
 302 */
 303#define SWAP_SUCCESS    0
 304#define SWAP_AGAIN      1
 305#define SWAP_FAIL       2
 306#define SWAP_MLOCK      3
 307#define SWAP_LZFREE     4
 308
 309#endif  /* _LINUX_RMAP_H */
 310