linux/mm/swap_state.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/swap_state.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 *
   7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
   8 */
   9#include <linux/mm.h>
  10#include <linux/gfp.h>
  11#include <linux/kernel_stat.h>
  12#include <linux/swap.h>
  13#include <linux/swapops.h>
  14#include <linux/init.h>
  15#include <linux/pagemap.h>
  16#include <linux/backing-dev.h>
  17#include <linux/blkdev.h>
  18#include <linux/pagevec.h>
  19#include <linux/migrate.h>
  20
  21#include <asm/pgtable.h>
  22
  23/*
  24 * swapper_space is a fiction, retained to simplify the path through
  25 * vmscan's shrink_page_list.
  26 */
  27static const struct address_space_operations swap_aops = {
  28        .writepage      = swap_writepage,
  29        .set_page_dirty = swap_set_page_dirty,
  30#ifdef CONFIG_MIGRATION
  31        .migratepage    = migrate_page,
  32#endif
  33};
  34
  35struct address_space swapper_spaces[MAX_SWAPFILES] = {
  36        [0 ... MAX_SWAPFILES - 1] = {
  37                .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
  38                .i_mmap_writable = ATOMIC_INIT(0),
  39                .a_ops          = &swap_aops,
  40        }
  41};
  42
  43#define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
  44
  45static struct {
  46        unsigned long add_total;
  47        unsigned long del_total;
  48        unsigned long find_success;
  49        unsigned long find_total;
  50} swap_cache_info;
  51
  52unsigned long total_swapcache_pages(void)
  53{
  54        int i;
  55        unsigned long ret = 0;
  56
  57        for (i = 0; i < MAX_SWAPFILES; i++)
  58                ret += swapper_spaces[i].nrpages;
  59        return ret;
  60}
  61
  62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
  63
  64void show_swap_cache_info(void)
  65{
  66        printk("%lu pages in swap cache\n", total_swapcache_pages());
  67        printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
  68                swap_cache_info.add_total, swap_cache_info.del_total,
  69                swap_cache_info.find_success, swap_cache_info.find_total);
  70        printk("Free swap  = %ldkB\n",
  71                get_nr_swap_pages() << (PAGE_SHIFT - 10));
  72        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
  73}
  74
  75/*
  76 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  77 * but sets SwapCache flag and private instead of mapping and index.
  78 */
  79int __add_to_swap_cache(struct page *page, swp_entry_t entry)
  80{
  81        int error;
  82        struct address_space *address_space;
  83
  84        VM_BUG_ON_PAGE(!PageLocked(page), page);
  85        VM_BUG_ON_PAGE(PageSwapCache(page), page);
  86        VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
  87
  88        get_page(page);
  89        SetPageSwapCache(page);
  90        set_page_private(page, entry.val);
  91
  92        address_space = swap_address_space(entry);
  93        spin_lock_irq(&address_space->tree_lock);
  94        error = radix_tree_insert(&address_space->page_tree,
  95                                        entry.val, page);
  96        if (likely(!error)) {
  97                address_space->nrpages++;
  98                __inc_zone_page_state(page, NR_FILE_PAGES);
  99                INC_CACHE_INFO(add_total);
 100        }
 101        spin_unlock_irq(&address_space->tree_lock);
 102
 103        if (unlikely(error)) {
 104                /*
 105                 * Only the context which have set SWAP_HAS_CACHE flag
 106                 * would call add_to_swap_cache().
 107                 * So add_to_swap_cache() doesn't returns -EEXIST.
 108                 */
 109                VM_BUG_ON(error == -EEXIST);
 110                set_page_private(page, 0UL);
 111                ClearPageSwapCache(page);
 112                put_page(page);
 113        }
 114
 115        return error;
 116}
 117
 118
 119int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
 120{
 121        int error;
 122
 123        error = radix_tree_maybe_preload(gfp_mask);
 124        if (!error) {
 125                error = __add_to_swap_cache(page, entry);
 126                radix_tree_preload_end();
 127        }
 128        return error;
 129}
 130
 131/*
 132 * This must be called only on pages that have
 133 * been verified to be in the swap cache.
 134 */
 135void __delete_from_swap_cache(struct page *page)
 136{
 137        swp_entry_t entry;
 138        struct address_space *address_space;
 139
 140        VM_BUG_ON_PAGE(!PageLocked(page), page);
 141        VM_BUG_ON_PAGE(!PageSwapCache(page), page);
 142        VM_BUG_ON_PAGE(PageWriteback(page), page);
 143
 144        entry.val = page_private(page);
 145        address_space = swap_address_space(entry);
 146        radix_tree_delete(&address_space->page_tree, page_private(page));
 147        set_page_private(page, 0);
 148        ClearPageSwapCache(page);
 149        address_space->nrpages--;
 150        __dec_zone_page_state(page, NR_FILE_PAGES);
 151        INC_CACHE_INFO(del_total);
 152}
 153
 154/**
 155 * add_to_swap - allocate swap space for a page
 156 * @page: page we want to move to swap
 157 *
 158 * Allocate swap space for the page and add the page to the
 159 * swap cache.  Caller needs to hold the page lock. 
 160 */
 161int add_to_swap(struct page *page, struct list_head *list)
 162{
 163        swp_entry_t entry;
 164        int err;
 165
 166        VM_BUG_ON_PAGE(!PageLocked(page), page);
 167        VM_BUG_ON_PAGE(!PageUptodate(page), page);
 168
 169        entry = get_swap_page();
 170        if (!entry.val)
 171                return 0;
 172
 173        if (mem_cgroup_try_charge_swap(page, entry)) {
 174                swapcache_free(entry);
 175                return 0;
 176        }
 177
 178        if (unlikely(PageTransHuge(page)))
 179                if (unlikely(split_huge_page_to_list(page, list))) {
 180                        swapcache_free(entry);
 181                        return 0;
 182                }
 183
 184        /*
 185         * Radix-tree node allocations from PF_MEMALLOC contexts could
 186         * completely exhaust the page allocator. __GFP_NOMEMALLOC
 187         * stops emergency reserves from being allocated.
 188         *
 189         * TODO: this could cause a theoretical memory reclaim
 190         * deadlock in the swap out path.
 191         */
 192        /*
 193         * Add it to the swap cache.
 194         */
 195        err = add_to_swap_cache(page, entry,
 196                        __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
 197
 198        if (!err) {
 199                return 1;
 200        } else {        /* -ENOMEM radix-tree allocation failure */
 201                /*
 202                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 203                 * clear SWAP_HAS_CACHE flag.
 204                 */
 205                swapcache_free(entry);
 206                return 0;
 207        }
 208}
 209
 210/*
 211 * This must be called only on pages that have
 212 * been verified to be in the swap cache and locked.
 213 * It will never put the page into the free list,
 214 * the caller has a reference on the page.
 215 */
 216void delete_from_swap_cache(struct page *page)
 217{
 218        swp_entry_t entry;
 219        struct address_space *address_space;
 220
 221        entry.val = page_private(page);
 222
 223        address_space = swap_address_space(entry);
 224        spin_lock_irq(&address_space->tree_lock);
 225        __delete_from_swap_cache(page);
 226        spin_unlock_irq(&address_space->tree_lock);
 227
 228        swapcache_free(entry);
 229        put_page(page);
 230}
 231
 232/* 
 233 * If we are the only user, then try to free up the swap cache. 
 234 * 
 235 * Its ok to check for PageSwapCache without the page lock
 236 * here because we are going to recheck again inside
 237 * try_to_free_swap() _with_ the lock.
 238 *                                      - Marcelo
 239 */
 240static inline void free_swap_cache(struct page *page)
 241{
 242        if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
 243                try_to_free_swap(page);
 244                unlock_page(page);
 245        }
 246}
 247
 248/* 
 249 * Perform a free_page(), also freeing any swap cache associated with
 250 * this page if it is the last user of the page.
 251 */
 252void free_page_and_swap_cache(struct page *page)
 253{
 254        free_swap_cache(page);
 255        put_page(page);
 256}
 257
 258/*
 259 * Passed an array of pages, drop them all from swapcache and then release
 260 * them.  They are removed from the LRU and freed if this is their last use.
 261 */
 262void free_pages_and_swap_cache(struct page **pages, int nr)
 263{
 264        struct page **pagep = pages;
 265        int i;
 266
 267        lru_add_drain();
 268        for (i = 0; i < nr; i++)
 269                free_swap_cache(pagep[i]);
 270        release_pages(pagep, nr, false);
 271}
 272
 273/*
 274 * Lookup a swap entry in the swap cache. A found page will be returned
 275 * unlocked and with its refcount incremented - we rely on the kernel
 276 * lock getting page table operations atomic even if we drop the page
 277 * lock before returning.
 278 */
 279struct page * lookup_swap_cache(swp_entry_t entry)
 280{
 281        struct page *page;
 282
 283        page = find_get_page(swap_address_space(entry), entry.val);
 284
 285        if (page) {
 286                INC_CACHE_INFO(find_success);
 287                if (TestClearPageReadahead(page))
 288                        atomic_inc(&swapin_readahead_hits);
 289        }
 290
 291        INC_CACHE_INFO(find_total);
 292        return page;
 293}
 294
 295struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 296                        struct vm_area_struct *vma, unsigned long addr,
 297                        bool *new_page_allocated)
 298{
 299        struct page *found_page, *new_page = NULL;
 300        struct address_space *swapper_space = swap_address_space(entry);
 301        int err;
 302        *new_page_allocated = false;
 303
 304        do {
 305                /*
 306                 * First check the swap cache.  Since this is normally
 307                 * called after lookup_swap_cache() failed, re-calling
 308                 * that would confuse statistics.
 309                 */
 310                found_page = find_get_page(swapper_space, entry.val);
 311                if (found_page)
 312                        break;
 313
 314                /*
 315                 * Get a new page to read into from swap.
 316                 */
 317                if (!new_page) {
 318                        new_page = alloc_page_vma(gfp_mask, vma, addr);
 319                        if (!new_page)
 320                                break;          /* Out of memory */
 321                }
 322
 323                /*
 324                 * call radix_tree_preload() while we can wait.
 325                 */
 326                err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
 327                if (err)
 328                        break;
 329
 330                /*
 331                 * Swap entry may have been freed since our caller observed it.
 332                 */
 333                err = swapcache_prepare(entry);
 334                if (err == -EEXIST) {
 335                        radix_tree_preload_end();
 336                        /*
 337                         * We might race against get_swap_page() and stumble
 338                         * across a SWAP_HAS_CACHE swap_map entry whose page
 339                         * has not been brought into the swapcache yet, while
 340                         * the other end is scheduled away waiting on discard
 341                         * I/O completion at scan_swap_map().
 342                         *
 343                         * In order to avoid turning this transitory state
 344                         * into a permanent loop around this -EEXIST case
 345                         * if !CONFIG_PREEMPT and the I/O completion happens
 346                         * to be waiting on the CPU waitqueue where we are now
 347                         * busy looping, we just conditionally invoke the
 348                         * scheduler here, if there are some more important
 349                         * tasks to run.
 350                         */
 351                        cond_resched();
 352                        continue;
 353                }
 354                if (err) {              /* swp entry is obsolete ? */
 355                        radix_tree_preload_end();
 356                        break;
 357                }
 358
 359                /* May fail (-ENOMEM) if radix-tree node allocation failed. */
 360                __SetPageLocked(new_page);
 361                SetPageSwapBacked(new_page);
 362                err = __add_to_swap_cache(new_page, entry);
 363                if (likely(!err)) {
 364                        radix_tree_preload_end();
 365                        /*
 366                         * Initiate read into locked page and return.
 367                         */
 368                        lru_cache_add_anon(new_page);
 369                        *new_page_allocated = true;
 370                        return new_page;
 371                }
 372                radix_tree_preload_end();
 373                ClearPageSwapBacked(new_page);
 374                __ClearPageLocked(new_page);
 375                /*
 376                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 377                 * clear SWAP_HAS_CACHE flag.
 378                 */
 379                swapcache_free(entry);
 380        } while (err != -ENOMEM);
 381
 382        if (new_page)
 383                put_page(new_page);
 384        return found_page;
 385}
 386
 387/*
 388 * Locate a page of swap in physical memory, reserving swap cache space
 389 * and reading the disk if it is not already cached.
 390 * A failure return means that either the page allocation failed or that
 391 * the swap entry is no longer in use.
 392 */
 393struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 394                        struct vm_area_struct *vma, unsigned long addr)
 395{
 396        bool page_was_allocated;
 397        struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
 398                        vma, addr, &page_was_allocated);
 399
 400        if (page_was_allocated)
 401                swap_readpage(retpage);
 402
 403        return retpage;
 404}
 405
 406static unsigned long swapin_nr_pages(unsigned long offset)
 407{
 408        static unsigned long prev_offset;
 409        unsigned int pages, max_pages, last_ra;
 410        static atomic_t last_readahead_pages;
 411
 412        max_pages = 1 << READ_ONCE(page_cluster);
 413        if (max_pages <= 1)
 414                return 1;
 415
 416        /*
 417         * This heuristic has been found to work well on both sequential and
 418         * random loads, swapping to hard disk or to SSD: please don't ask
 419         * what the "+ 2" means, it just happens to work well, that's all.
 420         */
 421        pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
 422        if (pages == 2) {
 423                /*
 424                 * We can have no readahead hits to judge by: but must not get
 425                 * stuck here forever, so check for an adjacent offset instead
 426                 * (and don't even bother to check whether swap type is same).
 427                 */
 428                if (offset != prev_offset + 1 && offset != prev_offset - 1)
 429                        pages = 1;
 430                prev_offset = offset;
 431        } else {
 432                unsigned int roundup = 4;
 433                while (roundup < pages)
 434                        roundup <<= 1;
 435                pages = roundup;
 436        }
 437
 438        if (pages > max_pages)
 439                pages = max_pages;
 440
 441        /* Don't shrink readahead too fast */
 442        last_ra = atomic_read(&last_readahead_pages) / 2;
 443        if (pages < last_ra)
 444                pages = last_ra;
 445        atomic_set(&last_readahead_pages, pages);
 446
 447        return pages;
 448}
 449
 450/**
 451 * swapin_readahead - swap in pages in hope we need them soon
 452 * @entry: swap entry of this memory
 453 * @gfp_mask: memory allocation flags
 454 * @vma: user vma this address belongs to
 455 * @addr: target address for mempolicy
 456 *
 457 * Returns the struct page for entry and addr, after queueing swapin.
 458 *
 459 * Primitive swap readahead code. We simply read an aligned block of
 460 * (1 << page_cluster) entries in the swap area. This method is chosen
 461 * because it doesn't cost us any seek time.  We also make sure to queue
 462 * the 'original' request together with the readahead ones...
 463 *
 464 * This has been extended to use the NUMA policies from the mm triggering
 465 * the readahead.
 466 *
 467 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
 468 */
 469struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 470                        struct vm_area_struct *vma, unsigned long addr)
 471{
 472        struct page *page;
 473        unsigned long entry_offset = swp_offset(entry);
 474        unsigned long offset = entry_offset;
 475        unsigned long start_offset, end_offset;
 476        unsigned long mask;
 477        struct blk_plug plug;
 478
 479        mask = swapin_nr_pages(offset) - 1;
 480        if (!mask)
 481                goto skip;
 482
 483        /* Read a page_cluster sized and aligned cluster around offset. */
 484        start_offset = offset & ~mask;
 485        end_offset = offset | mask;
 486        if (!start_offset)      /* First page is swap header. */
 487                start_offset++;
 488
 489        blk_start_plug(&plug);
 490        for (offset = start_offset; offset <= end_offset ; offset++) {
 491                /* Ok, do the async read-ahead now */
 492                page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
 493                                                gfp_mask, vma, addr);
 494                if (!page)
 495                        continue;
 496                if (offset != entry_offset)
 497                        SetPageReadahead(page);
 498                put_page(page);
 499        }
 500        blk_finish_plug(&plug);
 501
 502        lru_add_drain();        /* Push any new pages onto the LRU now */
 503skip:
 504        return read_swap_cache_async(entry, gfp_mask, vma, addr);
 505}
 506