linux/mm/swap_state.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/swap_state.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 *
   7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
   8 */
   9#include <linux/mm.h>
  10#include <linux/gfp.h>
  11#include <linux/kernel_stat.h>
  12#include <linux/swap.h>
  13#include <linux/swapops.h>
  14#include <linux/init.h>
  15#include <linux/pagemap.h>
  16#include <linux/backing-dev.h>
  17#include <linux/blkdev.h>
  18#include <linux/pagevec.h>
  19#include <linux/migrate.h>
  20
  21#include <asm/pgtable.h>
  22
  23/*
  24 * swapper_space is a fiction, retained to simplify the path through
  25 * vmscan's shrink_page_list.
  26 */
  27static const struct address_space_operations swap_aops = {
  28        .writepage      = swap_writepage,
  29        .set_page_dirty = swap_set_page_dirty,
  30#ifdef CONFIG_MIGRATION
  31        .migratepage    = migrate_page,
  32#endif
  33};
  34
  35struct address_space swapper_spaces[MAX_SWAPFILES] = {
  36        [0 ... MAX_SWAPFILES - 1] = {
  37                .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
  38                .i_mmap_writable = ATOMIC_INIT(0),
  39                .a_ops          = &swap_aops,
  40        }
  41};
  42
  43#define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
  44
  45static struct {
  46        unsigned long add_total;
  47        unsigned long del_total;
  48        unsigned long find_success;
  49        unsigned long find_total;
  50} swap_cache_info;
  51
  52unsigned long total_swapcache_pages(void)
  53{
  54        int i;
  55        unsigned long ret = 0;
  56
  57        for (i = 0; i < MAX_SWAPFILES; i++)
  58                ret += swapper_spaces[i].nrpages;
  59        return ret;
  60}
  61
  62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
  63
  64void show_swap_cache_info(void)
  65{
  66        printk("%lu pages in swap cache\n", total_swapcache_pages());
  67        printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
  68                swap_cache_info.add_total, swap_cache_info.del_total,
  69                swap_cache_info.find_success, swap_cache_info.find_total);
  70        printk("Free swap  = %ldkB\n",
  71                get_nr_swap_pages() << (PAGE_SHIFT - 10));
  72        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
  73}
  74
  75/*
  76 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  77 * but sets SwapCache flag and private instead of mapping and index.
  78 */
  79int __add_to_swap_cache(struct page *page, swp_entry_t entry)
  80{
  81        int error;
  82        struct address_space *address_space;
  83
  84        VM_BUG_ON_PAGE(!PageLocked(page), page);
  85        VM_BUG_ON_PAGE(PageSwapCache(page), page);
  86        VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
  87
  88        page_cache_get(page);
  89        SetPageSwapCache(page);
  90        set_page_private(page, entry.val);
  91
  92        address_space = swap_address_space(entry);
  93        spin_lock_irq(&address_space->tree_lock);
  94        error = radix_tree_insert(&address_space->page_tree,
  95                                        entry.val, page);
  96        if (likely(!error)) {
  97                address_space->nrpages++;
  98                __inc_zone_page_state(page, NR_FILE_PAGES);
  99                INC_CACHE_INFO(add_total);
 100        }
 101        spin_unlock_irq(&address_space->tree_lock);
 102
 103        if (unlikely(error)) {
 104                /*
 105                 * Only the context which have set SWAP_HAS_CACHE flag
 106                 * would call add_to_swap_cache().
 107                 * So add_to_swap_cache() doesn't returns -EEXIST.
 108                 */
 109                VM_BUG_ON(error == -EEXIST);
 110                set_page_private(page, 0UL);
 111                ClearPageSwapCache(page);
 112                page_cache_release(page);
 113        }
 114
 115        return error;
 116}
 117
 118
 119int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
 120{
 121        int error;
 122
 123        error = radix_tree_maybe_preload(gfp_mask);
 124        if (!error) {
 125                error = __add_to_swap_cache(page, entry);
 126                radix_tree_preload_end();
 127        }
 128        return error;
 129}
 130
 131/*
 132 * This must be called only on pages that have
 133 * been verified to be in the swap cache.
 134 */
 135void __delete_from_swap_cache(struct page *page)
 136{
 137        swp_entry_t entry;
 138        struct address_space *address_space;
 139
 140        VM_BUG_ON_PAGE(!PageLocked(page), page);
 141        VM_BUG_ON_PAGE(!PageSwapCache(page), page);
 142        VM_BUG_ON_PAGE(PageWriteback(page), page);
 143
 144        entry.val = page_private(page);
 145        address_space = swap_address_space(entry);
 146        radix_tree_delete(&address_space->page_tree, page_private(page));
 147        set_page_private(page, 0);
 148        ClearPageSwapCache(page);
 149        address_space->nrpages--;
 150        __dec_zone_page_state(page, NR_FILE_PAGES);
 151        INC_CACHE_INFO(del_total);
 152}
 153
 154/**
 155 * add_to_swap - allocate swap space for a page
 156 * @page: page we want to move to swap
 157 *
 158 * Allocate swap space for the page and add the page to the
 159 * swap cache.  Caller needs to hold the page lock. 
 160 */
 161int add_to_swap(struct page *page, struct list_head *list)
 162{
 163        swp_entry_t entry;
 164        int err;
 165
 166        VM_BUG_ON_PAGE(!PageLocked(page), page);
 167        VM_BUG_ON_PAGE(!PageUptodate(page), page);
 168
 169        entry = get_swap_page();
 170        if (!entry.val)
 171                return 0;
 172
 173        if (unlikely(PageTransHuge(page)))
 174                if (unlikely(split_huge_page_to_list(page, list))) {
 175                        swapcache_free(entry);
 176                        return 0;
 177                }
 178
 179        /*
 180         * Radix-tree node allocations from PF_MEMALLOC contexts could
 181         * completely exhaust the page allocator. __GFP_NOMEMALLOC
 182         * stops emergency reserves from being allocated.
 183         *
 184         * TODO: this could cause a theoretical memory reclaim
 185         * deadlock in the swap out path.
 186         */
 187        /*
 188         * Add it to the swap cache and mark it dirty
 189         */
 190        err = add_to_swap_cache(page, entry,
 191                        __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
 192
 193        if (!err) {     /* Success */
 194                SetPageDirty(page);
 195                return 1;
 196        } else {        /* -ENOMEM radix-tree allocation failure */
 197                /*
 198                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 199                 * clear SWAP_HAS_CACHE flag.
 200                 */
 201                swapcache_free(entry);
 202                return 0;
 203        }
 204}
 205
 206/*
 207 * This must be called only on pages that have
 208 * been verified to be in the swap cache and locked.
 209 * It will never put the page into the free list,
 210 * the caller has a reference on the page.
 211 */
 212void delete_from_swap_cache(struct page *page)
 213{
 214        swp_entry_t entry;
 215        struct address_space *address_space;
 216
 217        entry.val = page_private(page);
 218
 219        address_space = swap_address_space(entry);
 220        spin_lock_irq(&address_space->tree_lock);
 221        __delete_from_swap_cache(page);
 222        spin_unlock_irq(&address_space->tree_lock);
 223
 224        swapcache_free(entry);
 225        page_cache_release(page);
 226}
 227
 228/* 
 229 * If we are the only user, then try to free up the swap cache. 
 230 * 
 231 * Its ok to check for PageSwapCache without the page lock
 232 * here because we are going to recheck again inside
 233 * try_to_free_swap() _with_ the lock.
 234 *                                      - Marcelo
 235 */
 236static inline void free_swap_cache(struct page *page)
 237{
 238        if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
 239                try_to_free_swap(page);
 240                unlock_page(page);
 241        }
 242}
 243
 244/* 
 245 * Perform a free_page(), also freeing any swap cache associated with
 246 * this page if it is the last user of the page.
 247 */
 248void free_page_and_swap_cache(struct page *page)
 249{
 250        free_swap_cache(page);
 251        page_cache_release(page);
 252}
 253
 254/*
 255 * Passed an array of pages, drop them all from swapcache and then release
 256 * them.  They are removed from the LRU and freed if this is their last use.
 257 */
 258void free_pages_and_swap_cache(struct page **pages, int nr)
 259{
 260        struct page **pagep = pages;
 261        int i;
 262
 263        lru_add_drain();
 264        for (i = 0; i < nr; i++)
 265                free_swap_cache(pagep[i]);
 266        release_pages(pagep, nr, false);
 267}
 268
 269/*
 270 * Lookup a swap entry in the swap cache. A found page will be returned
 271 * unlocked and with its refcount incremented - we rely on the kernel
 272 * lock getting page table operations atomic even if we drop the page
 273 * lock before returning.
 274 */
 275struct page * lookup_swap_cache(swp_entry_t entry)
 276{
 277        struct page *page;
 278
 279        page = find_get_page(swap_address_space(entry), entry.val);
 280
 281        if (page) {
 282                INC_CACHE_INFO(find_success);
 283                if (TestClearPageReadahead(page))
 284                        atomic_inc(&swapin_readahead_hits);
 285        }
 286
 287        INC_CACHE_INFO(find_total);
 288        return page;
 289}
 290
 291struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 292                        struct vm_area_struct *vma, unsigned long addr,
 293                        bool *new_page_allocated)
 294{
 295        struct page *found_page, *new_page = NULL;
 296        struct address_space *swapper_space = swap_address_space(entry);
 297        int err;
 298        *new_page_allocated = false;
 299
 300        do {
 301                /*
 302                 * First check the swap cache.  Since this is normally
 303                 * called after lookup_swap_cache() failed, re-calling
 304                 * that would confuse statistics.
 305                 */
 306                found_page = find_get_page(swapper_space, entry.val);
 307                if (found_page)
 308                        break;
 309
 310                /*
 311                 * Get a new page to read into from swap.
 312                 */
 313                if (!new_page) {
 314                        new_page = alloc_page_vma(gfp_mask, vma, addr);
 315                        if (!new_page)
 316                                break;          /* Out of memory */
 317                }
 318
 319                /*
 320                 * call radix_tree_preload() while we can wait.
 321                 */
 322                err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
 323                if (err)
 324                        break;
 325
 326                /*
 327                 * Swap entry may have been freed since our caller observed it.
 328                 */
 329                err = swapcache_prepare(entry);
 330                if (err == -EEXIST) {
 331                        radix_tree_preload_end();
 332                        /*
 333                         * We might race against get_swap_page() and stumble
 334                         * across a SWAP_HAS_CACHE swap_map entry whose page
 335                         * has not been brought into the swapcache yet, while
 336                         * the other end is scheduled away waiting on discard
 337                         * I/O completion at scan_swap_map().
 338                         *
 339                         * In order to avoid turning this transitory state
 340                         * into a permanent loop around this -EEXIST case
 341                         * if !CONFIG_PREEMPT and the I/O completion happens
 342                         * to be waiting on the CPU waitqueue where we are now
 343                         * busy looping, we just conditionally invoke the
 344                         * scheduler here, if there are some more important
 345                         * tasks to run.
 346                         */
 347                        cond_resched();
 348                        continue;
 349                }
 350                if (err) {              /* swp entry is obsolete ? */
 351                        radix_tree_preload_end();
 352                        break;
 353                }
 354
 355                /* May fail (-ENOMEM) if radix-tree node allocation failed. */
 356                __set_page_locked(new_page);
 357                SetPageSwapBacked(new_page);
 358                err = __add_to_swap_cache(new_page, entry);
 359                if (likely(!err)) {
 360                        radix_tree_preload_end();
 361                        /*
 362                         * Initiate read into locked page and return.
 363                         */
 364                        lru_cache_add_anon(new_page);
 365                        *new_page_allocated = true;
 366                        return new_page;
 367                }
 368                radix_tree_preload_end();
 369                ClearPageSwapBacked(new_page);
 370                __clear_page_locked(new_page);
 371                /*
 372                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 373                 * clear SWAP_HAS_CACHE flag.
 374                 */
 375                swapcache_free(entry);
 376        } while (err != -ENOMEM);
 377
 378        if (new_page)
 379                page_cache_release(new_page);
 380        return found_page;
 381}
 382
 383/*
 384 * Locate a page of swap in physical memory, reserving swap cache space
 385 * and reading the disk if it is not already cached.
 386 * A failure return means that either the page allocation failed or that
 387 * the swap entry is no longer in use.
 388 */
 389struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 390                        struct vm_area_struct *vma, unsigned long addr)
 391{
 392        bool page_was_allocated;
 393        struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
 394                        vma, addr, &page_was_allocated);
 395
 396        if (page_was_allocated)
 397                swap_readpage(retpage);
 398
 399        return retpage;
 400}
 401
 402static unsigned long swapin_nr_pages(unsigned long offset)
 403{
 404        static unsigned long prev_offset;
 405        unsigned int pages, max_pages, last_ra;
 406        static atomic_t last_readahead_pages;
 407
 408        max_pages = 1 << READ_ONCE(page_cluster);
 409        if (max_pages <= 1)
 410                return 1;
 411
 412        /*
 413         * This heuristic has been found to work well on both sequential and
 414         * random loads, swapping to hard disk or to SSD: please don't ask
 415         * what the "+ 2" means, it just happens to work well, that's all.
 416         */
 417        pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
 418        if (pages == 2) {
 419                /*
 420                 * We can have no readahead hits to judge by: but must not get
 421                 * stuck here forever, so check for an adjacent offset instead
 422                 * (and don't even bother to check whether swap type is same).
 423                 */
 424                if (offset != prev_offset + 1 && offset != prev_offset - 1)
 425                        pages = 1;
 426                prev_offset = offset;
 427        } else {
 428                unsigned int roundup = 4;
 429                while (roundup < pages)
 430                        roundup <<= 1;
 431                pages = roundup;
 432        }
 433
 434        if (pages > max_pages)
 435                pages = max_pages;
 436
 437        /* Don't shrink readahead too fast */
 438        last_ra = atomic_read(&last_readahead_pages) / 2;
 439        if (pages < last_ra)
 440                pages = last_ra;
 441        atomic_set(&last_readahead_pages, pages);
 442
 443        return pages;
 444}
 445
 446/**
 447 * swapin_readahead - swap in pages in hope we need them soon
 448 * @entry: swap entry of this memory
 449 * @gfp_mask: memory allocation flags
 450 * @vma: user vma this address belongs to
 451 * @addr: target address for mempolicy
 452 *
 453 * Returns the struct page for entry and addr, after queueing swapin.
 454 *
 455 * Primitive swap readahead code. We simply read an aligned block of
 456 * (1 << page_cluster) entries in the swap area. This method is chosen
 457 * because it doesn't cost us any seek time.  We also make sure to queue
 458 * the 'original' request together with the readahead ones...
 459 *
 460 * This has been extended to use the NUMA policies from the mm triggering
 461 * the readahead.
 462 *
 463 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
 464 */
 465struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 466                        struct vm_area_struct *vma, unsigned long addr)
 467{
 468        struct page *page;
 469        unsigned long entry_offset = swp_offset(entry);
 470        unsigned long offset = entry_offset;
 471        unsigned long start_offset, end_offset;
 472        unsigned long mask;
 473        struct blk_plug plug;
 474
 475        mask = swapin_nr_pages(offset) - 1;
 476        if (!mask)
 477                goto skip;
 478
 479        /* Read a page_cluster sized and aligned cluster around offset. */
 480        start_offset = offset & ~mask;
 481        end_offset = offset | mask;
 482        if (!start_offset)      /* First page is swap header. */
 483                start_offset++;
 484
 485        blk_start_plug(&plug);
 486        for (offset = start_offset; offset <= end_offset ; offset++) {
 487                /* Ok, do the async read-ahead now */
 488                page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
 489                                                gfp_mask, vma, addr);
 490                if (!page)
 491                        continue;
 492                if (offset != entry_offset)
 493                        SetPageReadahead(page);
 494                page_cache_release(page);
 495        }
 496        blk_finish_plug(&plug);
 497
 498        lru_add_drain();        /* Push any new pages onto the LRU now */
 499skip:
 500        return read_swap_cache_async(entry, gfp_mask, vma, addr);
 501}
 502