linux/include/linux/rmap.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_RMAP_H
   3#define _LINUX_RMAP_H
   4/*
   5 * Declarations for Reverse Mapping functions in mm/rmap.c
   6 */
   7
   8#include <linux/list.h>
   9#include <linux/slab.h>
  10#include <linux/mm.h>
  11#include <linux/rwsem.h>
  12#include <linux/memcontrol.h>
  13#include <linux/highmem.h>
  14
  15/*
  16 * The anon_vma heads a list of private "related" vmas, to scan if
  17 * an anonymous page pointing to this anon_vma needs to be unmapped:
  18 * the vmas on the list will be related by forking, or by splitting.
  19 *
  20 * Since vmas come and go as they are split and merged (particularly
  21 * in mprotect), the mapping field of an anonymous page cannot point
  22 * directly to a vma: instead it points to an anon_vma, on whose list
  23 * the related vmas can be easily linked or unlinked.
  24 *
  25 * After unlinking the last vma on the list, we must garbage collect
  26 * the anon_vma object itself: we're guaranteed no page can be
  27 * pointing to this anon_vma once its vma list is empty.
  28 */
  29struct anon_vma {
  30        struct anon_vma *root;          /* Root of this anon_vma tree */
  31        struct rw_semaphore rwsem;      /* W: modification, R: walking the list */
  32        /*
  33         * The refcount is taken on an anon_vma when there is no
  34         * guarantee that the vma of page tables will exist for
  35         * the duration of the operation. A caller that takes
  36         * the reference is responsible for clearing up the
  37         * anon_vma if they are the last user on release
  38         */
  39        atomic_t refcount;
  40
  41        /*
  42         * Count of child anon_vmas and VMAs which points to this anon_vma.
  43         *
  44         * This counter is used for making decision about reusing anon_vma
  45         * instead of forking new one. See comments in function anon_vma_clone.
  46         */
  47        unsigned degree;
  48
  49        struct anon_vma *parent;        /* Parent of this anon_vma */
  50
  51        /*
  52         * NOTE: the LSB of the rb_root.rb_node is set by
  53         * mm_take_all_locks() _after_ taking the above lock. So the
  54         * rb_root must only be read/written after taking the above lock
  55         * to be sure to see a valid next pointer. The LSB bit itself
  56         * is serialized by a system wide lock only visible to
  57         * mm_take_all_locks() (mm_all_locks_mutex).
  58         */
  59
  60        /* Interval tree of private "related" vmas */
  61        struct rb_root_cached rb_root;
  62};
  63
  64/*
  65 * The copy-on-write semantics of fork mean that an anon_vma
  66 * can become associated with multiple processes. Furthermore,
  67 * each child process will have its own anon_vma, where new
  68 * pages for that process are instantiated.
  69 *
  70 * This structure allows us to find the anon_vmas associated
  71 * with a VMA, or the VMAs associated with an anon_vma.
  72 * The "same_vma" list contains the anon_vma_chains linking
  73 * all the anon_vmas associated with this VMA.
  74 * The "rb" field indexes on an interval tree the anon_vma_chains
  75 * which link all the VMAs associated with this anon_vma.
  76 */
  77struct anon_vma_chain {
  78        struct vm_area_struct *vma;
  79        struct anon_vma *anon_vma;
  80        struct list_head same_vma;   /* locked by mmap_lock & page_table_lock */
  81        struct rb_node rb;                      /* locked by anon_vma->rwsem */
  82        unsigned long rb_subtree_last;
  83#ifdef CONFIG_DEBUG_VM_RB
  84        unsigned long cached_vma_start, cached_vma_last;
  85#endif
  86};
  87
  88enum ttu_flags {
  89        TTU_SPLIT_HUGE_PMD      = 0x4,  /* split huge PMD if any */
  90        TTU_IGNORE_MLOCK        = 0x8,  /* ignore mlock */
  91        TTU_SYNC                = 0x10, /* avoid racy checks with PVMW_SYNC */
  92        TTU_IGNORE_HWPOISON     = 0x20, /* corrupted page is recoverable */
  93        TTU_BATCH_FLUSH         = 0x40, /* Batch TLB flushes where possible
  94                                         * and caller guarantees they will
  95                                         * do a final flush if necessary */
  96        TTU_RMAP_LOCKED         = 0x80, /* do not grab rmap lock:
  97                                         * caller holds it */
  98};
  99
 100#ifdef CONFIG_MMU
 101static inline void get_anon_vma(struct anon_vma *anon_vma)
 102{
 103        atomic_inc(&anon_vma->refcount);
 104}
 105
 106void __put_anon_vma(struct anon_vma *anon_vma);
 107
 108static inline void put_anon_vma(struct anon_vma *anon_vma)
 109{
 110        if (atomic_dec_and_test(&anon_vma->refcount))
 111                __put_anon_vma(anon_vma);
 112}
 113
 114static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
 115{
 116        down_write(&anon_vma->root->rwsem);
 117}
 118
 119static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
 120{
 121        up_write(&anon_vma->root->rwsem);
 122}
 123
 124static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
 125{
 126        down_read(&anon_vma->root->rwsem);
 127}
 128
 129static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
 130{
 131        up_read(&anon_vma->root->rwsem);
 132}
 133
 134
 135/*
 136 * anon_vma helper functions.
 137 */
 138void anon_vma_init(void);       /* create anon_vma_cachep */
 139int  __anon_vma_prepare(struct vm_area_struct *);
 140void unlink_anon_vmas(struct vm_area_struct *);
 141int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
 142int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
 143
 144static inline int anon_vma_prepare(struct vm_area_struct *vma)
 145{
 146        if (likely(vma->anon_vma))
 147                return 0;
 148
 149        return __anon_vma_prepare(vma);
 150}
 151
 152static inline void anon_vma_merge(struct vm_area_struct *vma,
 153                                  struct vm_area_struct *next)
 154{
 155        VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
 156        unlink_anon_vmas(next);
 157}
 158
 159struct anon_vma *page_get_anon_vma(struct page *page);
 160
 161/* bitflags for do_page_add_anon_rmap() */
 162#define RMAP_EXCLUSIVE 0x01
 163#define RMAP_COMPOUND 0x02
 164
 165/*
 166 * rmap interfaces called when adding or removing pte of page
 167 */
 168void page_move_anon_rmap(struct page *, struct vm_area_struct *);
 169void page_add_anon_rmap(struct page *, struct vm_area_struct *,
 170                unsigned long, bool);
 171void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
 172                           unsigned long, int);
 173void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 174                unsigned long, bool);
 175void page_add_file_rmap(struct page *, bool);
 176void page_remove_rmap(struct page *, bool);
 177
 178void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
 179                            unsigned long);
 180void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 181                                unsigned long);
 182
 183static inline void page_dup_rmap(struct page *page, bool compound)
 184{
 185        atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
 186}
 187
 188/*
 189 * Called from mm/vmscan.c to handle paging out
 190 */
 191int page_referenced(struct page *, int is_locked,
 192                        struct mem_cgroup *memcg, unsigned long *vm_flags);
 193
 194void try_to_migrate(struct page *page, enum ttu_flags flags);
 195void try_to_unmap(struct page *, enum ttu_flags flags);
 196
 197int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
 198                                unsigned long end, struct page **pages,
 199                                void *arg);
 200
 201/* Avoid racy checks */
 202#define PVMW_SYNC               (1 << 0)
 203/* Look for migarion entries rather than present PTEs */
 204#define PVMW_MIGRATION          (1 << 1)
 205
 206struct page_vma_mapped_walk {
 207        struct page *page;
 208        struct vm_area_struct *vma;
 209        unsigned long address;
 210        pmd_t *pmd;
 211        pte_t *pte;
 212        spinlock_t *ptl;
 213        unsigned int flags;
 214};
 215
 216static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
 217{
 218        /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
 219        if (pvmw->pte && !PageHuge(pvmw->page))
 220                pte_unmap(pvmw->pte);
 221        if (pvmw->ptl)
 222                spin_unlock(pvmw->ptl);
 223}
 224
 225bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
 226
 227/*
 228 * Used by swapoff to help locate where page is expected in vma.
 229 */
 230unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
 231
 232/*
 233 * Cleans the PTEs of shared mappings.
 234 * (and since clean PTEs should also be readonly, write protects them too)
 235 *
 236 * returns the number of cleaned PTEs.
 237 */
 238int page_mkclean(struct page *);
 239
 240/*
 241 * called in munlock()/munmap() path to check for other vmas holding
 242 * the page mlocked.
 243 */
 244void page_mlock(struct page *page);
 245
 246void remove_migration_ptes(struct page *old, struct page *new, bool locked);
 247
 248/*
 249 * Called by memory-failure.c to kill processes.
 250 */
 251struct anon_vma *page_lock_anon_vma_read(struct page *page);
 252void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
 253int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 254
 255/*
 256 * rmap_walk_control: To control rmap traversing for specific needs
 257 *
 258 * arg: passed to rmap_one() and invalid_vma()
 259 * rmap_one: executed on each vma where page is mapped
 260 * done: for checking traversing termination condition
 261 * anon_lock: for getting anon_lock by optimized way rather than default
 262 * invalid_vma: for skipping uninterested vma
 263 */
 264struct rmap_walk_control {
 265        void *arg;
 266        /*
 267         * Return false if page table scanning in rmap_walk should be stopped.
 268         * Otherwise, return true.
 269         */
 270        bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
 271                                        unsigned long addr, void *arg);
 272        int (*done)(struct page *page);
 273        struct anon_vma *(*anon_lock)(struct page *page);
 274        bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
 275};
 276
 277void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
 278void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
 279
 280#else   /* !CONFIG_MMU */
 281
 282#define anon_vma_init()         do {} while (0)
 283#define anon_vma_prepare(vma)   (0)
 284#define anon_vma_link(vma)      do {} while (0)
 285
 286static inline int page_referenced(struct page *page, int is_locked,
 287                                  struct mem_cgroup *memcg,
 288                                  unsigned long *vm_flags)
 289{
 290        *vm_flags = 0;
 291        return 0;
 292}
 293
 294static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
 295{
 296}
 297
 298static inline int page_mkclean(struct page *page)
 299{
 300        return 0;
 301}
 302
 303
 304#endif  /* CONFIG_MMU */
 305
 306#endif  /* _LINUX_RMAP_H */
 307