linux/mm/rmap.c
<<
>>
Prefs
   1/*
   2 * mm/rmap.c - physical to virtual reverse mappings
   3 *
   4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
   5 * Released under the General Public License (GPL).
   6 *
   7 * Simple, low overhead reverse mapping scheme.
   8 * Please try to keep this thing as modular as possible.
   9 *
  10 * Provides methods for unmapping each kind of mapped page:
  11 * the anon methods track anonymous pages, and
  12 * the file methods track pages belonging to an inode.
  13 *
  14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17 * Contributions by Hugh Dickins 2003, 2004
  18 */
  19
  20/*
  21 * Lock ordering in mm:
  22 *
  23 * inode->i_mutex       (while writing or truncating, not reading or faulting)
  24 *   inode->i_alloc_sem (vmtruncate_range)
  25 *   mm->mmap_sem
  26 *     page->flags PG_locked (lock_page)
  27 *       mapping->i_mmap_lock
  28 *         anon_vma->lock
  29 *           mm->page_table_lock or pte_lock
  30 *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  31 *             swap_lock (in swap_duplicate, swap_info_get)
  32 *               mmlist_lock (in mmput, drain_mmlist and others)
  33 *               mapping->private_lock (in __set_page_dirty_buffers)
  34 *               inode_lock (in set_page_dirty's __mark_inode_dirty)
  35 *                 sb_lock (within inode_lock in fs/fs-writeback.c)
  36 *                 mapping->tree_lock (widely used, in set_page_dirty,
  37 *                           in arch-dependent flush_dcache_mmap_lock,
  38 *                           within inode_lock in __sync_single_inode)
  39 *
  40 * (code doesn't rely on that order so it could be switched around)
  41 * ->tasklist_lock
  42 *   anon_vma->lock      (memory_failure, collect_procs_anon)
  43 *     pte map lock
  44 */
  45
  46#include <linux/mm.h>
  47#include <linux/pagemap.h>
  48#include <linux/swap.h>
  49#include <linux/swapops.h>
  50#include <linux/slab.h>
  51#include <linux/init.h>
  52#include <linux/rmap.h>
  53#include <linux/rcupdate.h>
  54#include <linux/module.h>
  55#include <linux/memcontrol.h>
  56#include <linux/mmu_notifier.h>
  57#include <linux/migrate.h>
  58
  59#include <asm/tlbflush.h>
  60
  61#include "internal.h"
  62
  63static struct kmem_cache *anon_vma_cachep;
  64
  65static inline struct anon_vma *anon_vma_alloc(void)
  66{
  67        return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  68}
  69
  70static inline void anon_vma_free(struct anon_vma *anon_vma)
  71{
  72        kmem_cache_free(anon_vma_cachep, anon_vma);
  73}
  74
  75/**
  76 * anon_vma_prepare - attach an anon_vma to a memory region
  77 * @vma: the memory region in question
  78 *
  79 * This makes sure the memory mapping described by 'vma' has
  80 * an 'anon_vma' attached to it, so that we can associate the
  81 * anonymous pages mapped into it with that anon_vma.
  82 *
  83 * The common case will be that we already have one, but if
  84 * if not we either need to find an adjacent mapping that we
  85 * can re-use the anon_vma from (very common when the only
  86 * reason for splitting a vma has been mprotect()), or we
  87 * allocate a new one.
  88 *
  89 * Anon-vma allocations are very subtle, because we may have
  90 * optimistically looked up an anon_vma in page_lock_anon_vma()
  91 * and that may actually touch the spinlock even in the newly
  92 * allocated vma (it depends on RCU to make sure that the
  93 * anon_vma isn't actually destroyed).
  94 *
  95 * As a result, we need to do proper anon_vma locking even
  96 * for the new allocation. At the same time, we do not want
  97 * to do any locking for the common case of already having
  98 * an anon_vma.
  99 *
 100 * This must be called with the mmap_sem held for reading.
 101 */
 102int anon_vma_prepare(struct vm_area_struct *vma)
 103{
 104        struct anon_vma *anon_vma = vma->anon_vma;
 105
 106        might_sleep();
 107        if (unlikely(!anon_vma)) {
 108                struct mm_struct *mm = vma->vm_mm;
 109                struct anon_vma *allocated;
 110
 111                anon_vma = find_mergeable_anon_vma(vma);
 112                allocated = NULL;
 113                if (!anon_vma) {
 114                        anon_vma = anon_vma_alloc();
 115                        if (unlikely(!anon_vma))
 116                                return -ENOMEM;
 117                        allocated = anon_vma;
 118                }
 119                spin_lock(&anon_vma->lock);
 120
 121                /* page_table_lock to protect against threads */
 122                spin_lock(&mm->page_table_lock);
 123                if (likely(!vma->anon_vma)) {
 124                        vma->anon_vma = anon_vma;
 125                        list_add_tail(&vma->anon_vma_node, &anon_vma->head);
 126                        allocated = NULL;
 127                }
 128                spin_unlock(&mm->page_table_lock);
 129
 130                spin_unlock(&anon_vma->lock);
 131                if (unlikely(allocated))
 132                        anon_vma_free(allocated);
 133        }
 134        return 0;
 135}
 136
 137void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
 138{
 139        BUG_ON(vma->anon_vma != next->anon_vma);
 140        list_del(&next->anon_vma_node);
 141}
 142
 143void __anon_vma_link(struct vm_area_struct *vma)
 144{
 145        struct anon_vma *anon_vma = vma->anon_vma;
 146
 147        if (anon_vma)
 148                list_add_tail(&vma->anon_vma_node, &anon_vma->head);
 149}
 150
 151void anon_vma_link(struct vm_area_struct *vma)
 152{
 153        struct anon_vma *anon_vma = vma->anon_vma;
 154
 155        if (anon_vma) {
 156                spin_lock(&anon_vma->lock);
 157                list_add_tail(&vma->anon_vma_node, &anon_vma->head);
 158                spin_unlock(&anon_vma->lock);
 159        }
 160}
 161
 162void anon_vma_unlink(struct vm_area_struct *vma)
 163{
 164        struct anon_vma *anon_vma = vma->anon_vma;
 165        int empty;
 166
 167        if (!anon_vma)
 168                return;
 169
 170        spin_lock(&anon_vma->lock);
 171        list_del(&vma->anon_vma_node);
 172
 173        /* We must garbage collect the anon_vma if it's empty */
 174        empty = list_empty(&anon_vma->head);
 175        spin_unlock(&anon_vma->lock);
 176
 177        if (empty)
 178                anon_vma_free(anon_vma);
 179}
 180
 181static void anon_vma_ctor(void *data)
 182{
 183        struct anon_vma *anon_vma = data;
 184
 185        spin_lock_init(&anon_vma->lock);
 186        INIT_LIST_HEAD(&anon_vma->head);
 187}
 188
 189void __init anon_vma_init(void)
 190{
 191        anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
 192                        0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
 193}
 194
 195/*
 196 * Getting a lock on a stable anon_vma from a page off the LRU is
 197 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
 198 */
 199struct anon_vma *page_lock_anon_vma(struct page *page)
 200{
 201        struct anon_vma *anon_vma;
 202        unsigned long anon_mapping;
 203
 204        rcu_read_lock();
 205        anon_mapping = (unsigned long) page->mapping;
 206        if (!(anon_mapping & PAGE_MAPPING_ANON))
 207                goto out;
 208        if (!page_mapped(page))
 209                goto out;
 210
 211        anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 212        spin_lock(&anon_vma->lock);
 213        return anon_vma;
 214out:
 215        rcu_read_unlock();
 216        return NULL;
 217}
 218
 219void page_unlock_anon_vma(struct anon_vma *anon_vma)
 220{
 221        spin_unlock(&anon_vma->lock);
 222        rcu_read_unlock();
 223}
 224
 225/*
 226 * At what user virtual address is page expected in @vma?
 227 * Returns virtual address or -EFAULT if page's index/offset is not
 228 * within the range mapped the @vma.
 229 */
 230static inline unsigned long
 231vma_address(struct page *page, struct vm_area_struct *vma)
 232{
 233        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 234        unsigned long address;
 235
 236        address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 237        if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
 238                /* page should be within @vma mapping range */
 239                return -EFAULT;
 240        }
 241        return address;
 242}
 243
 244/*
 245 * At what user virtual address is page expected in vma?
 246 * checking that the page matches the vma.
 247 */
 248unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 249{
 250        if (PageAnon(page)) {
 251                if ((void *)vma->anon_vma !=
 252                    (void *)page->mapping - PAGE_MAPPING_ANON)
 253                        return -EFAULT;
 254        } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
 255                if (!vma->vm_file ||
 256                    vma->vm_file->f_mapping != page->mapping)
 257                        return -EFAULT;
 258        } else
 259                return -EFAULT;
 260        return vma_address(page, vma);
 261}
 262
 263/*
 264 * Check that @page is mapped at @address into @mm.
 265 *
 266 * If @sync is false, page_check_address may perform a racy check to avoid
 267 * the page table lock when the pte is not present (helpful when reclaiming
 268 * highly shared pages).
 269 *
 270 * On success returns with pte mapped and locked.
 271 */
 272pte_t *page_check_address(struct page *page, struct mm_struct *mm,
 273                          unsigned long address, spinlock_t **ptlp, int sync)
 274{
 275        pgd_t *pgd;
 276        pud_t *pud;
 277        pmd_t *pmd;
 278        pte_t *pte;
 279        spinlock_t *ptl;
 280
 281        pgd = pgd_offset(mm, address);
 282        if (!pgd_present(*pgd))
 283                return NULL;
 284
 285        pud = pud_offset(pgd, address);
 286        if (!pud_present(*pud))
 287                return NULL;
 288
 289        pmd = pmd_offset(pud, address);
 290        if (!pmd_present(*pmd))
 291                return NULL;
 292
 293        pte = pte_offset_map(pmd, address);
 294        /* Make a quick check before getting the lock */
 295        if (!sync && !pte_present(*pte)) {
 296                pte_unmap(pte);
 297                return NULL;
 298        }
 299
 300        ptl = pte_lockptr(mm, pmd);
 301        spin_lock(ptl);
 302        if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
 303                *ptlp = ptl;
 304                return pte;
 305        }
 306        pte_unmap_unlock(pte, ptl);
 307        return NULL;
 308}
 309
 310/**
 311 * page_mapped_in_vma - check whether a page is really mapped in a VMA
 312 * @page: the page to test
 313 * @vma: the VMA to test
 314 *
 315 * Returns 1 if the page is mapped into the page tables of the VMA, 0
 316 * if the page is not mapped into the page tables of this VMA.  Only
 317 * valid for normal file or anonymous VMAs.
 318 */
 319int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
 320{
 321        unsigned long address;
 322        pte_t *pte;
 323        spinlock_t *ptl;
 324
 325        address = vma_address(page, vma);
 326        if (address == -EFAULT)         /* out of vma range */
 327                return 0;
 328        pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
 329        if (!pte)                       /* the page is not in this mm */
 330                return 0;
 331        pte_unmap_unlock(pte, ptl);
 332
 333        return 1;
 334}
 335
 336/*
 337 * Subfunctions of page_referenced: page_referenced_one called
 338 * repeatedly from either page_referenced_anon or page_referenced_file.
 339 */
 340static int page_referenced_one(struct page *page,
 341                               struct vm_area_struct *vma,
 342                               unsigned int *mapcount,
 343                               unsigned long *vm_flags)
 344{
 345        struct mm_struct *mm = vma->vm_mm;
 346        unsigned long address;
 347        pte_t *pte;
 348        spinlock_t *ptl;
 349        int referenced = 0;
 350
 351        address = vma_address(page, vma);
 352        if (address == -EFAULT)
 353                goto out;
 354
 355        pte = page_check_address(page, mm, address, &ptl, 0);
 356        if (!pte)
 357                goto out;
 358
 359        /*
 360         * Don't want to elevate referenced for mlocked page that gets this far,
 361         * in order that it progresses to try_to_unmap and is moved to the
 362         * unevictable list.
 363         */
 364        if (vma->vm_flags & VM_LOCKED) {
 365                *mapcount = 1;  /* break early from loop */
 366                *vm_flags |= VM_LOCKED;
 367                goto out_unmap;
 368        }
 369
 370        if (ptep_clear_flush_young_notify(vma, address, pte)) {
 371                /*
 372                 * Don't treat a reference through a sequentially read
 373                 * mapping as such.  If the page has been used in
 374                 * another mapping, we will catch it; if this other
 375                 * mapping is already gone, the unmap path will have
 376                 * set PG_referenced or activated the page.
 377                 */
 378                if (likely(!VM_SequentialReadHint(vma)))
 379                        referenced++;
 380        }
 381
 382        /* Pretend the page is referenced if the task has the
 383           swap token and is in the middle of a page fault. */
 384        if (mm != current->mm && has_swap_token(mm) &&
 385                        rwsem_is_locked(&mm->mmap_sem))
 386                referenced++;
 387
 388out_unmap:
 389        (*mapcount)--;
 390        pte_unmap_unlock(pte, ptl);
 391out:
 392        if (referenced)
 393                *vm_flags |= vma->vm_flags;
 394        return referenced;
 395}
 396
 397static int page_referenced_anon(struct page *page,
 398                                struct mem_cgroup *mem_cont,
 399                                unsigned long *vm_flags)
 400{
 401        unsigned int mapcount;
 402        struct anon_vma *anon_vma;
 403        struct vm_area_struct *vma;
 404        int referenced = 0;
 405
 406        anon_vma = page_lock_anon_vma(page);
 407        if (!anon_vma)
 408                return referenced;
 409
 410        mapcount = page_mapcount(page);
 411        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
 412                /*
 413                 * If we are reclaiming on behalf of a cgroup, skip
 414                 * counting on behalf of references from different
 415                 * cgroups
 416                 */
 417                if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
 418                        continue;
 419                referenced += page_referenced_one(page, vma,
 420                                                  &mapcount, vm_flags);
 421                if (!mapcount)
 422                        break;
 423        }
 424
 425        page_unlock_anon_vma(anon_vma);
 426        return referenced;
 427}
 428
 429/**
 430 * page_referenced_file - referenced check for object-based rmap
 431 * @page: the page we're checking references on.
 432 * @mem_cont: target memory controller
 433 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
 434 *
 435 * For an object-based mapped page, find all the places it is mapped and
 436 * check/clear the referenced flag.  This is done by following the page->mapping
 437 * pointer, then walking the chain of vmas it holds.  It returns the number
 438 * of references it found.
 439 *
 440 * This function is only called from page_referenced for object-based pages.
 441 */
 442static int page_referenced_file(struct page *page,
 443                                struct mem_cgroup *mem_cont,
 444                                unsigned long *vm_flags)
 445{
 446        unsigned int mapcount;
 447        struct address_space *mapping = page->mapping;
 448        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 449        struct vm_area_struct *vma;
 450        struct prio_tree_iter iter;
 451        int referenced = 0;
 452
 453        /*
 454         * The caller's checks on page->mapping and !PageAnon have made
 455         * sure that this is a file page: the check for page->mapping
 456         * excludes the case just before it gets set on an anon page.
 457         */
 458        BUG_ON(PageAnon(page));
 459
 460        /*
 461         * The page lock not only makes sure that page->mapping cannot
 462         * suddenly be NULLified by truncation, it makes sure that the
 463         * structure at mapping cannot be freed and reused yet,
 464         * so we can safely take mapping->i_mmap_lock.
 465         */
 466        BUG_ON(!PageLocked(page));
 467
 468        spin_lock(&mapping->i_mmap_lock);
 469
 470        /*
 471         * i_mmap_lock does not stabilize mapcount at all, but mapcount
 472         * is more likely to be accurate if we note it after spinning.
 473         */
 474        mapcount = page_mapcount(page);
 475
 476        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
 477                /*
 478                 * If we are reclaiming on behalf of a cgroup, skip
 479                 * counting on behalf of references from different
 480                 * cgroups
 481                 */
 482                if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
 483                        continue;
 484                referenced += page_referenced_one(page, vma,
 485                                                  &mapcount, vm_flags);
 486                if (!mapcount)
 487                        break;
 488        }
 489
 490        spin_unlock(&mapping->i_mmap_lock);
 491        return referenced;
 492}
 493
 494/**
 495 * page_referenced - test if the page was referenced
 496 * @page: the page to test
 497 * @is_locked: caller holds lock on the page
 498 * @mem_cont: target memory controller
 499 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
 500 *
 501 * Quick test_and_clear_referenced for all mappings to a page,
 502 * returns the number of ptes which referenced the page.
 503 */
 504int page_referenced(struct page *page,
 505                    int is_locked,
 506                    struct mem_cgroup *mem_cont,
 507                    unsigned long *vm_flags)
 508{
 509        int referenced = 0;
 510
 511        if (TestClearPageReferenced(page))
 512                referenced++;
 513
 514        *vm_flags = 0;
 515        if (page_mapped(page) && page->mapping) {
 516                if (PageAnon(page))
 517                        referenced += page_referenced_anon(page, mem_cont,
 518                                                                vm_flags);
 519                else if (is_locked)
 520                        referenced += page_referenced_file(page, mem_cont,
 521                                                                vm_flags);
 522                else if (!trylock_page(page))
 523                        referenced++;
 524                else {
 525                        if (page->mapping)
 526                                referenced += page_referenced_file(page,
 527                                                        mem_cont, vm_flags);
 528                        unlock_page(page);
 529                }
 530        }
 531
 532        if (page_test_and_clear_young(page))
 533                referenced++;
 534
 535        return referenced;
 536}
 537
 538static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
 539{
 540        struct mm_struct *mm = vma->vm_mm;
 541        unsigned long address;
 542        pte_t *pte;
 543        spinlock_t *ptl;
 544        int ret = 0;
 545
 546        address = vma_address(page, vma);
 547        if (address == -EFAULT)
 548                goto out;
 549
 550        pte = page_check_address(page, mm, address, &ptl, 1);
 551        if (!pte)
 552                goto out;
 553
 554        if (pte_dirty(*pte) || pte_write(*pte)) {
 555                pte_t entry;
 556
 557                flush_cache_page(vma, address, pte_pfn(*pte));
 558                entry = ptep_clear_flush_notify(vma, address, pte);
 559                entry = pte_wrprotect(entry);
 560                entry = pte_mkclean(entry);
 561                set_pte_at(mm, address, pte, entry);
 562                ret = 1;
 563        }
 564
 565        pte_unmap_unlock(pte, ptl);
 566out:
 567        return ret;
 568}
 569
 570static int page_mkclean_file(struct address_space *mapping, struct page *page)
 571{
 572        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 573        struct vm_area_struct *vma;
 574        struct prio_tree_iter iter;
 575        int ret = 0;
 576
 577        BUG_ON(PageAnon(page));
 578
 579        spin_lock(&mapping->i_mmap_lock);
 580        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
 581                if (vma->vm_flags & VM_SHARED)
 582                        ret += page_mkclean_one(page, vma);
 583        }
 584        spin_unlock(&mapping->i_mmap_lock);
 585        return ret;
 586}
 587
 588int page_mkclean(struct page *page)
 589{
 590        int ret = 0;
 591
 592        BUG_ON(!PageLocked(page));
 593
 594        if (page_mapped(page)) {
 595                struct address_space *mapping = page_mapping(page);
 596                if (mapping) {
 597                        ret = page_mkclean_file(mapping, page);
 598                        if (page_test_dirty(page)) {
 599                                page_clear_dirty(page);
 600                                ret = 1;
 601                        }
 602                }
 603        }
 604
 605        return ret;
 606}
 607EXPORT_SYMBOL_GPL(page_mkclean);
 608
 609/**
 610 * __page_set_anon_rmap - setup new anonymous rmap
 611 * @page:       the page to add the mapping to
 612 * @vma:        the vm area in which the mapping is added
 613 * @address:    the user virtual address mapped
 614 */
 615static void __page_set_anon_rmap(struct page *page,
 616        struct vm_area_struct *vma, unsigned long address)
 617{
 618        struct anon_vma *anon_vma = vma->anon_vma;
 619
 620        BUG_ON(!anon_vma);
 621        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
 622        page->mapping = (struct address_space *) anon_vma;
 623
 624        page->index = linear_page_index(vma, address);
 625
 626        /*
 627         * nr_mapped state can be updated without turning off
 628         * interrupts because it is not modified via interrupt.
 629         */
 630        __inc_zone_page_state(page, NR_ANON_PAGES);
 631}
 632
 633/**
 634 * __page_check_anon_rmap - sanity check anonymous rmap addition
 635 * @page:       the page to add the mapping to
 636 * @vma:        the vm area in which the mapping is added
 637 * @address:    the user virtual address mapped
 638 */
 639static void __page_check_anon_rmap(struct page *page,
 640        struct vm_area_struct *vma, unsigned long address)
 641{
 642#ifdef CONFIG_DEBUG_VM
 643        /*
 644         * The page's anon-rmap details (mapping and index) are guaranteed to
 645         * be set up correctly at this point.
 646         *
 647         * We have exclusion against page_add_anon_rmap because the caller
 648         * always holds the page locked, except if called from page_dup_rmap,
 649         * in which case the page is already known to be setup.
 650         *
 651         * We have exclusion against page_add_new_anon_rmap because those pages
 652         * are initially only visible via the pagetables, and the pte is locked
 653         * over the call to page_add_new_anon_rmap.
 654         */
 655        struct anon_vma *anon_vma = vma->anon_vma;
 656        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
 657        BUG_ON(page->mapping != (struct address_space *)anon_vma);
 658        BUG_ON(page->index != linear_page_index(vma, address));
 659#endif
 660}
 661
 662/**
 663 * page_add_anon_rmap - add pte mapping to an anonymous page
 664 * @page:       the page to add the mapping to
 665 * @vma:        the vm area in which the mapping is added
 666 * @address:    the user virtual address mapped
 667 *
 668 * The caller needs to hold the pte lock and the page must be locked.
 669 */
 670void page_add_anon_rmap(struct page *page,
 671        struct vm_area_struct *vma, unsigned long address)
 672{
 673        VM_BUG_ON(!PageLocked(page));
 674        VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
 675        if (atomic_inc_and_test(&page->_mapcount))
 676                __page_set_anon_rmap(page, vma, address);
 677        else
 678                __page_check_anon_rmap(page, vma, address);
 679}
 680
 681/**
 682 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
 683 * @page:       the page to add the mapping to
 684 * @vma:        the vm area in which the mapping is added
 685 * @address:    the user virtual address mapped
 686 *
 687 * Same as page_add_anon_rmap but must only be called on *new* pages.
 688 * This means the inc-and-test can be bypassed.
 689 * Page does not have to be locked.
 690 */
 691void page_add_new_anon_rmap(struct page *page,
 692        struct vm_area_struct *vma, unsigned long address)
 693{
 694        VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
 695        SetPageSwapBacked(page);
 696        atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
 697        __page_set_anon_rmap(page, vma, address);
 698        if (page_evictable(page, vma))
 699                lru_cache_add_lru(page, LRU_ACTIVE_ANON);
 700        else
 701                add_page_to_unevictable_list(page);
 702}
 703
 704/**
 705 * page_add_file_rmap - add pte mapping to a file page
 706 * @page: the page to add the mapping to
 707 *
 708 * The caller needs to hold the pte lock.
 709 */
 710void page_add_file_rmap(struct page *page)
 711{
 712        if (atomic_inc_and_test(&page->_mapcount)) {
 713                __inc_zone_page_state(page, NR_FILE_MAPPED);
 714                mem_cgroup_update_mapped_file_stat(page, 1);
 715        }
 716}
 717
 718/**
 719 * page_remove_rmap - take down pte mapping from a page
 720 * @page: page to remove mapping from
 721 *
 722 * The caller needs to hold the pte lock.
 723 */
 724void page_remove_rmap(struct page *page)
 725{
 726        /* page still mapped by someone else? */
 727        if (!atomic_add_negative(-1, &page->_mapcount))
 728                return;
 729
 730        /*
 731         * Now that the last pte has gone, s390 must transfer dirty
 732         * flag from storage key to struct page.  We can usually skip
 733         * this if the page is anon, so about to be freed; but perhaps
 734         * not if it's in swapcache - there might be another pte slot
 735         * containing the swap entry, but page not yet written to swap.
 736         */
 737        if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
 738                page_clear_dirty(page);
 739                set_page_dirty(page);
 740        }
 741        if (PageAnon(page)) {
 742                mem_cgroup_uncharge_page(page);
 743                __dec_zone_page_state(page, NR_ANON_PAGES);
 744        } else {
 745                __dec_zone_page_state(page, NR_FILE_MAPPED);
 746        }
 747        mem_cgroup_update_mapped_file_stat(page, -1);
 748        /*
 749         * It would be tidy to reset the PageAnon mapping here,
 750         * but that might overwrite a racing page_add_anon_rmap
 751         * which increments mapcount after us but sets mapping
 752         * before us: so leave the reset to free_hot_cold_page,
 753         * and remember that it's only reliable while mapped.
 754         * Leaving it set also helps swapoff to reinstate ptes
 755         * faster for those pages still in swapcache.
 756         */
 757}
 758
 759/*
 760 * Subfunctions of try_to_unmap: try_to_unmap_one called
 761 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
 762 */
 763static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 764                                enum ttu_flags flags)
 765{
 766        struct mm_struct *mm = vma->vm_mm;
 767        unsigned long address;
 768        pte_t *pte;
 769        pte_t pteval;
 770        spinlock_t *ptl;
 771        int ret = SWAP_AGAIN;
 772
 773        address = vma_address(page, vma);
 774        if (address == -EFAULT)
 775                goto out;
 776
 777        pte = page_check_address(page, mm, address, &ptl, 0);
 778        if (!pte)
 779                goto out;
 780
 781        /*
 782         * If the page is mlock()d, we cannot swap it out.
 783         * If it's recently referenced (perhaps page_referenced
 784         * skipped over this mm) then we should reactivate it.
 785         */
 786        if (!(flags & TTU_IGNORE_MLOCK)) {
 787                if (vma->vm_flags & VM_LOCKED) {
 788                        ret = SWAP_MLOCK;
 789                        goto out_unmap;
 790                }
 791        }
 792        if (!(flags & TTU_IGNORE_ACCESS)) {
 793                if (ptep_clear_flush_young_notify(vma, address, pte)) {
 794                        ret = SWAP_FAIL;
 795                        goto out_unmap;
 796                }
 797        }
 798
 799        /* Nuke the page table entry. */
 800        flush_cache_page(vma, address, page_to_pfn(page));
 801        pteval = ptep_clear_flush_notify(vma, address, pte);
 802
 803        /* Move the dirty bit to the physical page now the pte is gone. */
 804        if (pte_dirty(pteval))
 805                set_page_dirty(page);
 806
 807        /* Update high watermark before we lower rss */
 808        update_hiwater_rss(mm);
 809
 810        if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
 811                if (PageAnon(page))
 812                        dec_mm_counter(mm, anon_rss);
 813                else
 814                        dec_mm_counter(mm, file_rss);
 815                set_pte_at(mm, address, pte,
 816                                swp_entry_to_pte(make_hwpoison_entry(page)));
 817        } else if (PageAnon(page)) {
 818                swp_entry_t entry = { .val = page_private(page) };
 819
 820                if (PageSwapCache(page)) {
 821                        /*
 822                         * Store the swap location in the pte.
 823                         * See handle_pte_fault() ...
 824                         */
 825                        swap_duplicate(entry);
 826                        if (list_empty(&mm->mmlist)) {
 827                                spin_lock(&mmlist_lock);
 828                                if (list_empty(&mm->mmlist))
 829                                        list_add(&mm->mmlist, &init_mm.mmlist);
 830                                spin_unlock(&mmlist_lock);
 831                        }
 832                        dec_mm_counter(mm, anon_rss);
 833                } else if (PAGE_MIGRATION) {
 834                        /*
 835                         * Store the pfn of the page in a special migration
 836                         * pte. do_swap_page() will wait until the migration
 837                         * pte is removed and then restart fault handling.
 838                         */
 839                        BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
 840                        entry = make_migration_entry(page, pte_write(pteval));
 841                }
 842                set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
 843                BUG_ON(pte_file(*pte));
 844        } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
 845                /* Establish migration entry for a file page */
 846                swp_entry_t entry;
 847                entry = make_migration_entry(page, pte_write(pteval));
 848                set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
 849        } else
 850                dec_mm_counter(mm, file_rss);
 851
 852
 853        page_remove_rmap(page);
 854        page_cache_release(page);
 855
 856out_unmap:
 857        pte_unmap_unlock(pte, ptl);
 858out:
 859        return ret;
 860}
 861
 862/*
 863 * objrmap doesn't work for nonlinear VMAs because the assumption that
 864 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
 865 * Consequently, given a particular page and its ->index, we cannot locate the
 866 * ptes which are mapping that page without an exhaustive linear search.
 867 *
 868 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
 869 * maps the file to which the target page belongs.  The ->vm_private_data field
 870 * holds the current cursor into that scan.  Successive searches will circulate
 871 * around the vma's virtual address space.
 872 *
 873 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
 874 * more scanning pressure is placed against them as well.   Eventually pages
 875 * will become fully unmapped and are eligible for eviction.
 876 *
 877 * For very sparsely populated VMAs this is a little inefficient - chances are
 878 * there there won't be many ptes located within the scan cluster.  In this case
 879 * maybe we could scan further - to the end of the pte page, perhaps.
 880 *
 881 * Mlocked pages:  check VM_LOCKED under mmap_sem held for read, if we can
 882 * acquire it without blocking.  If vma locked, mlock the pages in the cluster,
 883 * rather than unmapping them.  If we encounter the "check_page" that vmscan is
 884 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
 885 */
 886#define CLUSTER_SIZE    min(32*PAGE_SIZE, PMD_SIZE)
 887#define CLUSTER_MASK    (~(CLUSTER_SIZE - 1))
 888
 889static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
 890                struct vm_area_struct *vma, struct page *check_page)
 891{
 892        struct mm_struct *mm = vma->vm_mm;
 893        pgd_t *pgd;
 894        pud_t *pud;
 895        pmd_t *pmd;
 896        pte_t *pte;
 897        pte_t pteval;
 898        spinlock_t *ptl;
 899        struct page *page;
 900        unsigned long address;
 901        unsigned long end;
 902        int ret = SWAP_AGAIN;
 903        int locked_vma = 0;
 904
 905        address = (vma->vm_start + cursor) & CLUSTER_MASK;
 906        end = address + CLUSTER_SIZE;
 907        if (address < vma->vm_start)
 908                address = vma->vm_start;
 909        if (end > vma->vm_end)
 910                end = vma->vm_end;
 911
 912        pgd = pgd_offset(mm, address);
 913        if (!pgd_present(*pgd))
 914                return ret;
 915
 916        pud = pud_offset(pgd, address);
 917        if (!pud_present(*pud))
 918                return ret;
 919
 920        pmd = pmd_offset(pud, address);
 921        if (!pmd_present(*pmd))
 922                return ret;
 923
 924        /*
 925         * MLOCK_PAGES => feature is configured.
 926         * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
 927         * keep the sem while scanning the cluster for mlocking pages.
 928         */
 929        if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
 930                locked_vma = (vma->vm_flags & VM_LOCKED);
 931                if (!locked_vma)
 932                        up_read(&vma->vm_mm->mmap_sem); /* don't need it */
 933        }
 934
 935        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
 936
 937        /* Update high watermark before we lower rss */
 938        update_hiwater_rss(mm);
 939
 940        for (; address < end; pte++, address += PAGE_SIZE) {
 941                if (!pte_present(*pte))
 942                        continue;
 943                page = vm_normal_page(vma, address, *pte);
 944                BUG_ON(!page || PageAnon(page));
 945
 946                if (locked_vma) {
 947                        mlock_vma_page(page);   /* no-op if already mlocked */
 948                        if (page == check_page)
 949                                ret = SWAP_MLOCK;
 950                        continue;       /* don't unmap */
 951                }
 952
 953                if (ptep_clear_flush_young_notify(vma, address, pte))
 954                        continue;
 955
 956                /* Nuke the page table entry. */
 957                flush_cache_page(vma, address, pte_pfn(*pte));
 958                pteval = ptep_clear_flush_notify(vma, address, pte);
 959
 960                /* If nonlinear, store the file page offset in the pte. */
 961                if (page->index != linear_page_index(vma, address))
 962                        set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
 963
 964                /* Move the dirty bit to the physical page now the pte is gone. */
 965                if (pte_dirty(pteval))
 966                        set_page_dirty(page);
 967
 968                page_remove_rmap(page);
 969                page_cache_release(page);
 970                dec_mm_counter(mm, file_rss);
 971                (*mapcount)--;
 972        }
 973        pte_unmap_unlock(pte - 1, ptl);
 974        if (locked_vma)
 975                up_read(&vma->vm_mm->mmap_sem);
 976        return ret;
 977}
 978
 979/*
 980 * common handling for pages mapped in VM_LOCKED vmas
 981 */
 982static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
 983{
 984        int mlocked = 0;
 985
 986        if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
 987                if (vma->vm_flags & VM_LOCKED) {
 988                        mlock_vma_page(page);
 989                        mlocked++;      /* really mlocked the page */
 990                }
 991                up_read(&vma->vm_mm->mmap_sem);
 992        }
 993        return mlocked;
 994}
 995
 996/**
 997 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
 998 * rmap method
 999 * @page: the page to unmap/unlock
1000 * @unlock:  request for unlock rather than unmap [unlikely]
1001 * @migration:  unmapping for migration - ignored if @unlock
1002 *
1003 * Find all the mappings of a page using the mapping pointer and the vma chains
1004 * contained in the anon_vma struct it points to.
1005 *
1006 * This function is only called from try_to_unmap/try_to_munlock for
1007 * anonymous pages.
1008 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1009 * where the page was found will be held for write.  So, we won't recheck
1010 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1011 * 'LOCKED.
1012 */
1013static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1014{
1015        struct anon_vma *anon_vma;
1016        struct vm_area_struct *vma;
1017        unsigned int mlocked = 0;
1018        int ret = SWAP_AGAIN;
1019        int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
1020
1021        if (MLOCK_PAGES && unlikely(unlock))
1022                ret = SWAP_SUCCESS;     /* default for try_to_munlock() */
1023
1024        anon_vma = page_lock_anon_vma(page);
1025        if (!anon_vma)
1026                return ret;
1027
1028        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
1029                if (MLOCK_PAGES && unlikely(unlock)) {
1030                        if (!((vma->vm_flags & VM_LOCKED) &&
1031                              page_mapped_in_vma(page, vma)))
1032                                continue;  /* must visit all unlocked vmas */
1033                        ret = SWAP_MLOCK;  /* saw at least one mlocked vma */
1034                } else {
1035                        ret = try_to_unmap_one(page, vma, flags);
1036                        if (ret == SWAP_FAIL || !page_mapped(page))
1037                                break;
1038                }
1039                if (ret == SWAP_MLOCK) {
1040                        mlocked = try_to_mlock_page(page, vma);
1041                        if (mlocked)
1042                                break;  /* stop if actually mlocked page */
1043                }
1044        }
1045
1046        page_unlock_anon_vma(anon_vma);
1047
1048        if (mlocked)
1049                ret = SWAP_MLOCK;       /* actually mlocked the page */
1050        else if (ret == SWAP_MLOCK)
1051                ret = SWAP_AGAIN;       /* saw VM_LOCKED vma */
1052
1053        return ret;
1054}
1055
1056/**
1057 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
1058 * @page: the page to unmap/unlock
1059 * @flags: action and flags
1060 *
1061 * Find all the mappings of a page using the mapping pointer and the vma chains
1062 * contained in the address_space struct it points to.
1063 *
1064 * This function is only called from try_to_unmap/try_to_munlock for
1065 * object-based pages.
1066 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1067 * where the page was found will be held for write.  So, we won't recheck
1068 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1069 * 'LOCKED.
1070 */
1071static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1072{
1073        struct address_space *mapping = page->mapping;
1074        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1075        struct vm_area_struct *vma;
1076        struct prio_tree_iter iter;
1077        int ret = SWAP_AGAIN;
1078        unsigned long cursor;
1079        unsigned long max_nl_cursor = 0;
1080        unsigned long max_nl_size = 0;
1081        unsigned int mapcount;
1082        unsigned int mlocked = 0;
1083        int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
1084
1085        if (MLOCK_PAGES && unlikely(unlock))
1086                ret = SWAP_SUCCESS;     /* default for try_to_munlock() */
1087
1088        spin_lock(&mapping->i_mmap_lock);
1089        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1090                if (MLOCK_PAGES && unlikely(unlock)) {
1091                        if (!((vma->vm_flags & VM_LOCKED) &&
1092                                                page_mapped_in_vma(page, vma)))
1093                                continue;       /* must visit all vmas */
1094                        ret = SWAP_MLOCK;
1095                } else {
1096                        ret = try_to_unmap_one(page, vma, flags);
1097                        if (ret == SWAP_FAIL || !page_mapped(page))
1098                                goto out;
1099                }
1100                if (ret == SWAP_MLOCK) {
1101                        mlocked = try_to_mlock_page(page, vma);
1102                        if (mlocked)
1103                                break;  /* stop if actually mlocked page */
1104                }
1105        }
1106
1107        if (mlocked)
1108                goto out;
1109
1110        if (list_empty(&mapping->i_mmap_nonlinear))
1111                goto out;
1112
1113        list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1114                                                shared.vm_set.list) {
1115                if (MLOCK_PAGES && unlikely(unlock)) {
1116                        if (!(vma->vm_flags & VM_LOCKED))
1117                                continue;       /* must visit all vmas */
1118                        ret = SWAP_MLOCK;       /* leave mlocked == 0 */
1119                        goto out;               /* no need to look further */
1120                }
1121                if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
1122                        (vma->vm_flags & VM_LOCKED))
1123                        continue;
1124                cursor = (unsigned long) vma->vm_private_data;
1125                if (cursor > max_nl_cursor)
1126                        max_nl_cursor = cursor;
1127                cursor = vma->vm_end - vma->vm_start;
1128                if (cursor > max_nl_size)
1129                        max_nl_size = cursor;
1130        }
1131
1132        if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
1133                ret = SWAP_FAIL;
1134                goto out;
1135        }
1136
1137        /*
1138         * We don't try to search for this page in the nonlinear vmas,
1139         * and page_referenced wouldn't have found it anyway.  Instead
1140         * just walk the nonlinear vmas trying to age and unmap some.
1141         * The mapcount of the page we came in with is irrelevant,
1142         * but even so use it as a guide to how hard we should try?
1143         */
1144        mapcount = page_mapcount(page);
1145        if (!mapcount)
1146                goto out;
1147        cond_resched_lock(&mapping->i_mmap_lock);
1148
1149        max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1150        if (max_nl_cursor == 0)
1151                max_nl_cursor = CLUSTER_SIZE;
1152
1153        do {
1154                list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1155                                                shared.vm_set.list) {
1156                        if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
1157                            (vma->vm_flags & VM_LOCKED))
1158                                continue;
1159                        cursor = (unsigned long) vma->vm_private_data;
1160                        while ( cursor < max_nl_cursor &&
1161                                cursor < vma->vm_end - vma->vm_start) {
1162                                ret = try_to_unmap_cluster(cursor, &mapcount,
1163                                                                vma, page);
1164                                if (ret == SWAP_MLOCK)
1165                                        mlocked = 2;    /* to return below */
1166                                cursor += CLUSTER_SIZE;
1167                                vma->vm_private_data = (void *) cursor;
1168                                if ((int)mapcount <= 0)
1169                                        goto out;
1170                        }
1171                        vma->vm_private_data = (void *) max_nl_cursor;
1172                }
1173                cond_resched_lock(&mapping->i_mmap_lock);
1174                max_nl_cursor += CLUSTER_SIZE;
1175        } while (max_nl_cursor <= max_nl_size);
1176
1177        /*
1178         * Don't loop forever (perhaps all the remaining pages are
1179         * in locked vmas).  Reset cursor on all unreserved nonlinear
1180         * vmas, now forgetting on which ones it had fallen behind.
1181         */
1182        list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1183                vma->vm_private_data = NULL;
1184out:
1185        spin_unlock(&mapping->i_mmap_lock);
1186        if (mlocked)
1187                ret = SWAP_MLOCK;       /* actually mlocked the page */
1188        else if (ret == SWAP_MLOCK)
1189                ret = SWAP_AGAIN;       /* saw VM_LOCKED vma */
1190        return ret;
1191}
1192
1193/**
1194 * try_to_unmap - try to remove all page table mappings to a page
1195 * @page: the page to get unmapped
1196 * @flags: action and flags
1197 *
1198 * Tries to remove all the page table entries which are mapping this
1199 * page, used in the pageout path.  Caller must hold the page lock.
1200 * Return values are:
1201 *
1202 * SWAP_SUCCESS - we succeeded in removing all mappings
1203 * SWAP_AGAIN   - we missed a mapping, try again later
1204 * SWAP_FAIL    - the page is unswappable
1205 * SWAP_MLOCK   - page is mlocked.
1206 */
1207int try_to_unmap(struct page *page, enum ttu_flags flags)
1208{
1209        int ret;
1210
1211        BUG_ON(!PageLocked(page));
1212
1213        if (PageAnon(page))
1214                ret = try_to_unmap_anon(page, flags);
1215        else
1216                ret = try_to_unmap_file(page, flags);
1217        if (ret != SWAP_MLOCK && !page_mapped(page))
1218                ret = SWAP_SUCCESS;
1219        return ret;
1220}
1221
1222/**
1223 * try_to_munlock - try to munlock a page
1224 * @page: the page to be munlocked
1225 *
1226 * Called from munlock code.  Checks all of the VMAs mapping the page
1227 * to make sure nobody else has this page mlocked. The page will be
1228 * returned with PG_mlocked cleared if no other vmas have it mlocked.
1229 *
1230 * Return values are:
1231 *
1232 * SWAP_SUCCESS - no vma's holding page mlocked.
1233 * SWAP_AGAIN   - page mapped in mlocked vma -- couldn't acquire mmap sem
1234 * SWAP_MLOCK   - page is now mlocked.
1235 */
1236int try_to_munlock(struct page *page)
1237{
1238        VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1239
1240        if (PageAnon(page))
1241                return try_to_unmap_anon(page, TTU_MUNLOCK);
1242        else
1243                return try_to_unmap_file(page, TTU_MUNLOCK);
1244}
1245
1246