linux/mm/swap_state.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/swap_state.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 *
   7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
   8 */
   9#include <linux/mm.h>
  10#include <linux/gfp.h>
  11#include <linux/kernel_stat.h>
  12#include <linux/swap.h>
  13#include <linux/swapops.h>
  14#include <linux/init.h>
  15#include <linux/pagemap.h>
  16#include <linux/backing-dev.h>
  17#include <linux/pagevec.h>
  18#include <linux/migrate.h>
  19#include <linux/page_cgroup.h>
  20
  21#include <asm/pgtable.h>
  22
  23/*
  24 * swapper_space is a fiction, retained to simplify the path through
  25 * vmscan's shrink_page_list.
  26 */
  27static const struct address_space_operations swap_aops = {
  28        .writepage      = swap_writepage,
  29        .set_page_dirty = __set_page_dirty_no_writeback,
  30        .migratepage    = migrate_page,
  31};
  32
  33static struct backing_dev_info swap_backing_dev_info = {
  34        .name           = "swap",
  35        .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
  36};
  37
  38struct address_space swapper_space = {
  39        .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
  40        .tree_lock      = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
  41        .a_ops          = &swap_aops,
  42        .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
  43        .backing_dev_info = &swap_backing_dev_info,
  44};
  45
  46#define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
  47
  48static struct {
  49        unsigned long add_total;
  50        unsigned long del_total;
  51        unsigned long find_success;
  52        unsigned long find_total;
  53} swap_cache_info;
  54
  55void show_swap_cache_info(void)
  56{
  57        printk("%lu pages in swap cache\n", total_swapcache_pages);
  58        printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
  59                swap_cache_info.add_total, swap_cache_info.del_total,
  60                swap_cache_info.find_success, swap_cache_info.find_total);
  61        printk("Free swap  = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
  62        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
  63}
  64
  65/*
  66 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  67 * but sets SwapCache flag and private instead of mapping and index.
  68 */
  69static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
  70{
  71        int error;
  72
  73        VM_BUG_ON(!PageLocked(page));
  74        VM_BUG_ON(PageSwapCache(page));
  75        VM_BUG_ON(!PageSwapBacked(page));
  76
  77        page_cache_get(page);
  78        SetPageSwapCache(page);
  79        set_page_private(page, entry.val);
  80
  81        spin_lock_irq(&swapper_space.tree_lock);
  82        error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
  83        if (likely(!error)) {
  84                total_swapcache_pages++;
  85                __inc_zone_page_state(page, NR_FILE_PAGES);
  86                INC_CACHE_INFO(add_total);
  87        }
  88        spin_unlock_irq(&swapper_space.tree_lock);
  89
  90        if (unlikely(error)) {
  91                /*
  92                 * Only the context which have set SWAP_HAS_CACHE flag
  93                 * would call add_to_swap_cache().
  94                 * So add_to_swap_cache() doesn't returns -EEXIST.
  95                 */
  96                VM_BUG_ON(error == -EEXIST);
  97                set_page_private(page, 0UL);
  98                ClearPageSwapCache(page);
  99                page_cache_release(page);
 100        }
 101
 102        return error;
 103}
 104
 105
 106int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
 107{
 108        int error;
 109
 110        error = radix_tree_preload(gfp_mask);
 111        if (!error) {
 112                error = __add_to_swap_cache(page, entry);
 113                radix_tree_preload_end();
 114        }
 115        return error;
 116}
 117
 118/*
 119 * This must be called only on pages that have
 120 * been verified to be in the swap cache.
 121 */
 122void __delete_from_swap_cache(struct page *page)
 123{
 124        VM_BUG_ON(!PageLocked(page));
 125        VM_BUG_ON(!PageSwapCache(page));
 126        VM_BUG_ON(PageWriteback(page));
 127
 128        radix_tree_delete(&swapper_space.page_tree, page_private(page));
 129        set_page_private(page, 0);
 130        ClearPageSwapCache(page);
 131        total_swapcache_pages--;
 132        __dec_zone_page_state(page, NR_FILE_PAGES);
 133        INC_CACHE_INFO(del_total);
 134}
 135
 136/**
 137 * add_to_swap - allocate swap space for a page
 138 * @page: page we want to move to swap
 139 *
 140 * Allocate swap space for the page and add the page to the
 141 * swap cache.  Caller needs to hold the page lock. 
 142 */
 143int add_to_swap(struct page *page)
 144{
 145        swp_entry_t entry;
 146        int err;
 147
 148        VM_BUG_ON(!PageLocked(page));
 149        VM_BUG_ON(!PageUptodate(page));
 150
 151        entry = get_swap_page();
 152        if (!entry.val)
 153                return 0;
 154
 155        if (unlikely(PageTransHuge(page)))
 156                if (unlikely(split_huge_page(page))) {
 157                        swapcache_free(entry, NULL);
 158                        return 0;
 159                }
 160
 161        /*
 162         * Radix-tree node allocations from PF_MEMALLOC contexts could
 163         * completely exhaust the page allocator. __GFP_NOMEMALLOC
 164         * stops emergency reserves from being allocated.
 165         *
 166         * TODO: this could cause a theoretical memory reclaim
 167         * deadlock in the swap out path.
 168         */
 169        /*
 170         * Add it to the swap cache and mark it dirty
 171         */
 172        err = add_to_swap_cache(page, entry,
 173                        __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
 174
 175        if (!err) {     /* Success */
 176                SetPageDirty(page);
 177                return 1;
 178        } else {        /* -ENOMEM radix-tree allocation failure */
 179                /*
 180                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 181                 * clear SWAP_HAS_CACHE flag.
 182                 */
 183                swapcache_free(entry, NULL);
 184                return 0;
 185        }
 186}
 187
 188/*
 189 * This must be called only on pages that have
 190 * been verified to be in the swap cache and locked.
 191 * It will never put the page into the free list,
 192 * the caller has a reference on the page.
 193 */
 194void delete_from_swap_cache(struct page *page)
 195{
 196        swp_entry_t entry;
 197
 198        entry.val = page_private(page);
 199
 200        spin_lock_irq(&swapper_space.tree_lock);
 201        __delete_from_swap_cache(page);
 202        spin_unlock_irq(&swapper_space.tree_lock);
 203
 204        swapcache_free(entry, page);
 205        page_cache_release(page);
 206}
 207
 208/* 
 209 * If we are the only user, then try to free up the swap cache. 
 210 * 
 211 * Its ok to check for PageSwapCache without the page lock
 212 * here because we are going to recheck again inside
 213 * try_to_free_swap() _with_ the lock.
 214 *                                      - Marcelo
 215 */
 216static inline void free_swap_cache(struct page *page)
 217{
 218        if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
 219                try_to_free_swap(page);
 220                unlock_page(page);
 221        }
 222}
 223
 224/* 
 225 * Perform a free_page(), also freeing any swap cache associated with
 226 * this page if it is the last user of the page.
 227 */
 228void free_page_and_swap_cache(struct page *page)
 229{
 230        free_swap_cache(page);
 231        page_cache_release(page);
 232}
 233
 234/*
 235 * Passed an array of pages, drop them all from swapcache and then release
 236 * them.  They are removed from the LRU and freed if this is their last use.
 237 */
 238void free_pages_and_swap_cache(struct page **pages, int nr)
 239{
 240        struct page **pagep = pages;
 241
 242        lru_add_drain();
 243        while (nr) {
 244                int todo = min(nr, PAGEVEC_SIZE);
 245                int i;
 246
 247                for (i = 0; i < todo; i++)
 248                        free_swap_cache(pagep[i]);
 249                release_pages(pagep, todo, 0);
 250                pagep += todo;
 251                nr -= todo;
 252        }
 253}
 254
 255/*
 256 * Lookup a swap entry in the swap cache. A found page will be returned
 257 * unlocked and with its refcount incremented - we rely on the kernel
 258 * lock getting page table operations atomic even if we drop the page
 259 * lock before returning.
 260 */
 261struct page * lookup_swap_cache(swp_entry_t entry)
 262{
 263        struct page *page;
 264
 265        page = find_get_page(&swapper_space, entry.val);
 266
 267        if (page)
 268                INC_CACHE_INFO(find_success);
 269
 270        INC_CACHE_INFO(find_total);
 271        return page;
 272}
 273
 274/* 
 275 * Locate a page of swap in physical memory, reserving swap cache space
 276 * and reading the disk if it is not already cached.
 277 * A failure return means that either the page allocation failed or that
 278 * the swap entry is no longer in use.
 279 */
 280struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 281                        struct vm_area_struct *vma, unsigned long addr)
 282{
 283        struct page *found_page, *new_page = NULL;
 284        int err;
 285
 286        do {
 287                /*
 288                 * First check the swap cache.  Since this is normally
 289                 * called after lookup_swap_cache() failed, re-calling
 290                 * that would confuse statistics.
 291                 */
 292                found_page = find_get_page(&swapper_space, entry.val);
 293                if (found_page)
 294                        break;
 295
 296                /*
 297                 * Get a new page to read into from swap.
 298                 */
 299                if (!new_page) {
 300                        new_page = alloc_page_vma(gfp_mask, vma, addr);
 301                        if (!new_page)
 302                                break;          /* Out of memory */
 303                }
 304
 305                /*
 306                 * call radix_tree_preload() while we can wait.
 307                 */
 308                err = radix_tree_preload(gfp_mask & GFP_KERNEL);
 309                if (err)
 310                        break;
 311
 312                /*
 313                 * Swap entry may have been freed since our caller observed it.
 314                 */
 315                err = swapcache_prepare(entry);
 316                if (err == -EEXIST) {   /* seems racy */
 317                        radix_tree_preload_end();
 318                        continue;
 319                }
 320                if (err) {              /* swp entry is obsolete ? */
 321                        radix_tree_preload_end();
 322                        break;
 323                }
 324
 325                /* May fail (-ENOMEM) if radix-tree node allocation failed. */
 326                __set_page_locked(new_page);
 327                SetPageSwapBacked(new_page);
 328                err = __add_to_swap_cache(new_page, entry);
 329                if (likely(!err)) {
 330                        radix_tree_preload_end();
 331                        /*
 332                         * Initiate read into locked page and return.
 333                         */
 334                        lru_cache_add_anon(new_page);
 335                        swap_readpage(new_page);
 336                        return new_page;
 337                }
 338                radix_tree_preload_end();
 339                ClearPageSwapBacked(new_page);
 340                __clear_page_locked(new_page);
 341                /*
 342                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 343                 * clear SWAP_HAS_CACHE flag.
 344                 */
 345                swapcache_free(entry, NULL);
 346        } while (err != -ENOMEM);
 347
 348        if (new_page)
 349                page_cache_release(new_page);
 350        return found_page;
 351}
 352
 353/**
 354 * swapin_readahead - swap in pages in hope we need them soon
 355 * @entry: swap entry of this memory
 356 * @gfp_mask: memory allocation flags
 357 * @vma: user vma this address belongs to
 358 * @addr: target address for mempolicy
 359 *
 360 * Returns the struct page for entry and addr, after queueing swapin.
 361 *
 362 * Primitive swap readahead code. We simply read an aligned block of
 363 * (1 << page_cluster) entries in the swap area. This method is chosen
 364 * because it doesn't cost us any seek time.  We also make sure to queue
 365 * the 'original' request together with the readahead ones...
 366 *
 367 * This has been extended to use the NUMA policies from the mm triggering
 368 * the readahead.
 369 *
 370 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
 371 */
 372struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 373                        struct vm_area_struct *vma, unsigned long addr)
 374{
 375        struct page *page;
 376        unsigned long offset = swp_offset(entry);
 377        unsigned long start_offset, end_offset;
 378        unsigned long mask = (1UL << page_cluster) - 1;
 379
 380        /* Read a page_cluster sized and aligned cluster around offset. */
 381        start_offset = offset & ~mask;
 382        end_offset = offset | mask;
 383        if (!start_offset)      /* First page is swap header. */
 384                start_offset++;
 385
 386        for (offset = start_offset; offset <= end_offset ; offset++) {
 387                /* Ok, do the async read-ahead now */
 388                page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
 389                                                gfp_mask, vma, addr);
 390                if (!page)
 391                        continue;
 392                page_cache_release(page);
 393        }
 394        lru_add_drain();        /* Push any new pages onto the LRU now */
 395        return read_swap_cache_async(entry, gfp_mask, vma, addr);
 396}
 397