linux/mm/swap_state.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/mm/swap_state.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *  Swap reorganised 29.12.95, Stephen Tweedie
   7 *
   8 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
   9 */
  10#include <linux/mm.h>
  11#include <linux/gfp.h>
  12#include <linux/kernel_stat.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/init.h>
  16#include <linux/pagemap.h>
  17#include <linux/backing-dev.h>
  18#include <linux/blkdev.h>
  19#include <linux/pagevec.h>
  20#include <linux/migrate.h>
  21#include <linux/vmalloc.h>
  22#include <linux/swap_slots.h>
  23#include <linux/huge_mm.h>
  24
  25#include <asm/pgtable.h>
  26
  27/*
  28 * swapper_space is a fiction, retained to simplify the path through
  29 * vmscan's shrink_page_list.
  30 */
  31static const struct address_space_operations swap_aops = {
  32        .writepage      = swap_writepage,
  33        .set_page_dirty = swap_set_page_dirty,
  34#ifdef CONFIG_MIGRATION
  35        .migratepage    = migrate_page,
  36#endif
  37};
  38
  39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
  40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
  41static bool enable_vma_readahead __read_mostly = true;
  42
  43#define SWAP_RA_WIN_SHIFT       (PAGE_SHIFT / 2)
  44#define SWAP_RA_HITS_MASK       ((1UL << SWAP_RA_WIN_SHIFT) - 1)
  45#define SWAP_RA_HITS_MAX        SWAP_RA_HITS_MASK
  46#define SWAP_RA_WIN_MASK        (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
  47
  48#define SWAP_RA_HITS(v)         ((v) & SWAP_RA_HITS_MASK)
  49#define SWAP_RA_WIN(v)          (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
  50#define SWAP_RA_ADDR(v)         ((v) & PAGE_MASK)
  51
  52#define SWAP_RA_VAL(addr, win, hits)                            \
  53        (((addr) & PAGE_MASK) |                                 \
  54         (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |    \
  55         ((hits) & SWAP_RA_HITS_MASK))
  56
  57/* Initial readahead hits is 4 to start up with a small window */
  58#define GET_SWAP_RA_VAL(vma)                                    \
  59        (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
  60
  61#define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
  62#define ADD_CACHE_INFO(x, nr)   do { swap_cache_info.x += (nr); } while (0)
  63
  64static struct {
  65        unsigned long add_total;
  66        unsigned long del_total;
  67        unsigned long find_success;
  68        unsigned long find_total;
  69} swap_cache_info;
  70
  71unsigned long total_swapcache_pages(void)
  72{
  73        unsigned int i, j, nr;
  74        unsigned long ret = 0;
  75        struct address_space *spaces;
  76        struct swap_info_struct *si;
  77
  78        for (i = 0; i < MAX_SWAPFILES; i++) {
  79                swp_entry_t entry = swp_entry(i, 1);
  80
  81                /* Avoid get_swap_device() to warn for bad swap entry */
  82                if (!swp_swap_info(entry))
  83                        continue;
  84                /* Prevent swapoff to free swapper_spaces */
  85                si = get_swap_device(entry);
  86                if (!si)
  87                        continue;
  88                nr = nr_swapper_spaces[i];
  89                spaces = swapper_spaces[i];
  90                for (j = 0; j < nr; j++)
  91                        ret += spaces[j].nrpages;
  92                put_swap_device(si);
  93        }
  94        return ret;
  95}
  96
  97static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
  98
  99void show_swap_cache_info(void)
 100{
 101        printk("%lu pages in swap cache\n", total_swapcache_pages());
 102        printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 103                swap_cache_info.add_total, swap_cache_info.del_total,
 104                swap_cache_info.find_success, swap_cache_info.find_total);
 105        printk("Free swap  = %ldkB\n",
 106                get_nr_swap_pages() << (PAGE_SHIFT - 10));
 107        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 108}
 109
 110/*
 111 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 112 * but sets SwapCache flag and private instead of mapping and index.
 113 */
 114int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
 115{
 116        struct address_space *address_space = swap_address_space(entry);
 117        pgoff_t idx = swp_offset(entry);
 118        XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
 119        unsigned long i, nr = 1UL << compound_order(page);
 120
 121        VM_BUG_ON_PAGE(!PageLocked(page), page);
 122        VM_BUG_ON_PAGE(PageSwapCache(page), page);
 123        VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 124
 125        page_ref_add(page, nr);
 126        SetPageSwapCache(page);
 127
 128        do {
 129                xas_lock_irq(&xas);
 130                xas_create_range(&xas);
 131                if (xas_error(&xas))
 132                        goto unlock;
 133                for (i = 0; i < nr; i++) {
 134                        VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
 135                        set_page_private(page + i, entry.val + i);
 136                        xas_store(&xas, page + i);
 137                        xas_next(&xas);
 138                }
 139                address_space->nrpages += nr;
 140                __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
 141                ADD_CACHE_INFO(add_total, nr);
 142unlock:
 143                xas_unlock_irq(&xas);
 144        } while (xas_nomem(&xas, gfp));
 145
 146        if (!xas_error(&xas))
 147                return 0;
 148
 149        ClearPageSwapCache(page);
 150        page_ref_sub(page, nr);
 151        return xas_error(&xas);
 152}
 153
 154/*
 155 * This must be called only on pages that have
 156 * been verified to be in the swap cache.
 157 */
 158void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
 159{
 160        struct address_space *address_space = swap_address_space(entry);
 161        int i, nr = hpage_nr_pages(page);
 162        pgoff_t idx = swp_offset(entry);
 163        XA_STATE(xas, &address_space->i_pages, idx);
 164
 165        VM_BUG_ON_PAGE(!PageLocked(page), page);
 166        VM_BUG_ON_PAGE(!PageSwapCache(page), page);
 167        VM_BUG_ON_PAGE(PageWriteback(page), page);
 168
 169        for (i = 0; i < nr; i++) {
 170                void *entry = xas_store(&xas, NULL);
 171                VM_BUG_ON_PAGE(entry != page + i, entry);
 172                set_page_private(page + i, 0);
 173                xas_next(&xas);
 174        }
 175        ClearPageSwapCache(page);
 176        address_space->nrpages -= nr;
 177        __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
 178        ADD_CACHE_INFO(del_total, nr);
 179}
 180
 181/**
 182 * add_to_swap - allocate swap space for a page
 183 * @page: page we want to move to swap
 184 *
 185 * Allocate swap space for the page and add the page to the
 186 * swap cache.  Caller needs to hold the page lock. 
 187 */
 188int add_to_swap(struct page *page)
 189{
 190        swp_entry_t entry;
 191        int err;
 192
 193        VM_BUG_ON_PAGE(!PageLocked(page), page);
 194        VM_BUG_ON_PAGE(!PageUptodate(page), page);
 195
 196        entry = get_swap_page(page);
 197        if (!entry.val)
 198                return 0;
 199
 200        /*
 201         * XArray node allocations from PF_MEMALLOC contexts could
 202         * completely exhaust the page allocator. __GFP_NOMEMALLOC
 203         * stops emergency reserves from being allocated.
 204         *
 205         * TODO: this could cause a theoretical memory reclaim
 206         * deadlock in the swap out path.
 207         */
 208        /*
 209         * Add it to the swap cache.
 210         */
 211        err = add_to_swap_cache(page, entry,
 212                        __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
 213        if (err)
 214                /*
 215                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 216                 * clear SWAP_HAS_CACHE flag.
 217                 */
 218                goto fail;
 219        /*
 220         * Normally the page will be dirtied in unmap because its pte should be
 221         * dirty. A special case is MADV_FREE page. The page'e pte could have
 222         * dirty bit cleared but the page's SwapBacked bit is still set because
 223         * clearing the dirty bit and SwapBacked bit has no lock protected. For
 224         * such page, unmap will not set dirty bit for it, so page reclaim will
 225         * not write the page out. This can cause data corruption when the page
 226         * is swap in later. Always setting the dirty bit for the page solves
 227         * the problem.
 228         */
 229        set_page_dirty(page);
 230
 231        return 1;
 232
 233fail:
 234        put_swap_page(page, entry);
 235        return 0;
 236}
 237
 238/*
 239 * This must be called only on pages that have
 240 * been verified to be in the swap cache and locked.
 241 * It will never put the page into the free list,
 242 * the caller has a reference on the page.
 243 */
 244void delete_from_swap_cache(struct page *page)
 245{
 246        swp_entry_t entry = { .val = page_private(page) };
 247        struct address_space *address_space = swap_address_space(entry);
 248
 249        xa_lock_irq(&address_space->i_pages);
 250        __delete_from_swap_cache(page, entry);
 251        xa_unlock_irq(&address_space->i_pages);
 252
 253        put_swap_page(page, entry);
 254        page_ref_sub(page, hpage_nr_pages(page));
 255}
 256
 257/* 
 258 * If we are the only user, then try to free up the swap cache. 
 259 * 
 260 * Its ok to check for PageSwapCache without the page lock
 261 * here because we are going to recheck again inside
 262 * try_to_free_swap() _with_ the lock.
 263 *                                      - Marcelo
 264 */
 265static inline void free_swap_cache(struct page *page)
 266{
 267        if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
 268                try_to_free_swap(page);
 269                unlock_page(page);
 270        }
 271}
 272
 273/* 
 274 * Perform a free_page(), also freeing any swap cache associated with
 275 * this page if it is the last user of the page.
 276 */
 277void free_page_and_swap_cache(struct page *page)
 278{
 279        free_swap_cache(page);
 280        if (!is_huge_zero_page(page))
 281                put_page(page);
 282}
 283
 284/*
 285 * Passed an array of pages, drop them all from swapcache and then release
 286 * them.  They are removed from the LRU and freed if this is their last use.
 287 */
 288void free_pages_and_swap_cache(struct page **pages, int nr)
 289{
 290        struct page **pagep = pages;
 291        int i;
 292
 293        lru_add_drain();
 294        for (i = 0; i < nr; i++)
 295                free_swap_cache(pagep[i]);
 296        release_pages(pagep, nr);
 297}
 298
 299static inline bool swap_use_vma_readahead(void)
 300{
 301        return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
 302}
 303
 304/*
 305 * Lookup a swap entry in the swap cache. A found page will be returned
 306 * unlocked and with its refcount incremented - we rely on the kernel
 307 * lock getting page table operations atomic even if we drop the page
 308 * lock before returning.
 309 */
 310struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
 311                               unsigned long addr)
 312{
 313        struct page *page;
 314        struct swap_info_struct *si;
 315
 316        si = get_swap_device(entry);
 317        if (!si)
 318                return NULL;
 319        page = find_get_page(swap_address_space(entry), swp_offset(entry));
 320        put_swap_device(si);
 321
 322        INC_CACHE_INFO(find_total);
 323        if (page) {
 324                bool vma_ra = swap_use_vma_readahead();
 325                bool readahead;
 326
 327                INC_CACHE_INFO(find_success);
 328                /*
 329                 * At the moment, we don't support PG_readahead for anon THP
 330                 * so let's bail out rather than confusing the readahead stat.
 331                 */
 332                if (unlikely(PageTransCompound(page)))
 333                        return page;
 334
 335                readahead = TestClearPageReadahead(page);
 336                if (vma && vma_ra) {
 337                        unsigned long ra_val;
 338                        int win, hits;
 339
 340                        ra_val = GET_SWAP_RA_VAL(vma);
 341                        win = SWAP_RA_WIN(ra_val);
 342                        hits = SWAP_RA_HITS(ra_val);
 343                        if (readahead)
 344                                hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
 345                        atomic_long_set(&vma->swap_readahead_info,
 346                                        SWAP_RA_VAL(addr, win, hits));
 347                }
 348
 349                if (readahead) {
 350                        count_vm_event(SWAP_RA_HIT);
 351                        if (!vma || !vma_ra)
 352                                atomic_inc(&swapin_readahead_hits);
 353                }
 354        }
 355
 356        return page;
 357}
 358
 359struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 360                        struct vm_area_struct *vma, unsigned long addr,
 361                        bool *new_page_allocated)
 362{
 363        struct page *found_page = NULL, *new_page = NULL;
 364        struct swap_info_struct *si;
 365        int err;
 366        *new_page_allocated = false;
 367
 368        do {
 369                /*
 370                 * First check the swap cache.  Since this is normally
 371                 * called after lookup_swap_cache() failed, re-calling
 372                 * that would confuse statistics.
 373                 */
 374                si = get_swap_device(entry);
 375                if (!si)
 376                        break;
 377                found_page = find_get_page(swap_address_space(entry),
 378                                           swp_offset(entry));
 379                put_swap_device(si);
 380                if (found_page)
 381                        break;
 382
 383                /*
 384                 * Just skip read ahead for unused swap slot.
 385                 * During swap_off when swap_slot_cache is disabled,
 386                 * we have to handle the race between putting
 387                 * swap entry in swap cache and marking swap slot
 388                 * as SWAP_HAS_CACHE.  That's done in later part of code or
 389                 * else swap_off will be aborted if we return NULL.
 390                 */
 391                if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
 392                        break;
 393
 394                /*
 395                 * Get a new page to read into from swap.
 396                 */
 397                if (!new_page) {
 398                        new_page = alloc_page_vma(gfp_mask, vma, addr);
 399                        if (!new_page)
 400                                break;          /* Out of memory */
 401                }
 402
 403                /*
 404                 * Swap entry may have been freed since our caller observed it.
 405                 */
 406                err = swapcache_prepare(entry);
 407                if (err == -EEXIST) {
 408                        /*
 409                         * We might race against get_swap_page() and stumble
 410                         * across a SWAP_HAS_CACHE swap_map entry whose page
 411                         * has not been brought into the swapcache yet.
 412                         */
 413                        cond_resched();
 414                        continue;
 415                } else if (err)         /* swp entry is obsolete ? */
 416                        break;
 417
 418                /* May fail (-ENOMEM) if XArray node allocation failed. */
 419                __SetPageLocked(new_page);
 420                __SetPageSwapBacked(new_page);
 421                err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
 422                if (likely(!err)) {
 423                        /* Initiate read into locked page */
 424                        SetPageWorkingset(new_page);
 425                        lru_cache_add_anon(new_page);
 426                        *new_page_allocated = true;
 427                        return new_page;
 428                }
 429                __ClearPageLocked(new_page);
 430                /*
 431                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 432                 * clear SWAP_HAS_CACHE flag.
 433                 */
 434                put_swap_page(new_page, entry);
 435        } while (err != -ENOMEM);
 436
 437        if (new_page)
 438                put_page(new_page);
 439        return found_page;
 440}
 441
 442/*
 443 * Locate a page of swap in physical memory, reserving swap cache space
 444 * and reading the disk if it is not already cached.
 445 * A failure return means that either the page allocation failed or that
 446 * the swap entry is no longer in use.
 447 */
 448struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 449                struct vm_area_struct *vma, unsigned long addr, bool do_poll)
 450{
 451        bool page_was_allocated;
 452        struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
 453                        vma, addr, &page_was_allocated);
 454
 455        if (page_was_allocated)
 456                swap_readpage(retpage, do_poll);
 457
 458        return retpage;
 459}
 460
 461static unsigned int __swapin_nr_pages(unsigned long prev_offset,
 462                                      unsigned long offset,
 463                                      int hits,
 464                                      int max_pages,
 465                                      int prev_win)
 466{
 467        unsigned int pages, last_ra;
 468
 469        /*
 470         * This heuristic has been found to work well on both sequential and
 471         * random loads, swapping to hard disk or to SSD: please don't ask
 472         * what the "+ 2" means, it just happens to work well, that's all.
 473         */
 474        pages = hits + 2;
 475        if (pages == 2) {
 476                /*
 477                 * We can have no readahead hits to judge by: but must not get
 478                 * stuck here forever, so check for an adjacent offset instead
 479                 * (and don't even bother to check whether swap type is same).
 480                 */
 481                if (offset != prev_offset + 1 && offset != prev_offset - 1)
 482                        pages = 1;
 483        } else {
 484                unsigned int roundup = 4;
 485                while (roundup < pages)
 486                        roundup <<= 1;
 487                pages = roundup;
 488        }
 489
 490        if (pages > max_pages)
 491                pages = max_pages;
 492
 493        /* Don't shrink readahead too fast */
 494        last_ra = prev_win / 2;
 495        if (pages < last_ra)
 496                pages = last_ra;
 497
 498        return pages;
 499}
 500
 501static unsigned long swapin_nr_pages(unsigned long offset)
 502{
 503        static unsigned long prev_offset;
 504        unsigned int hits, pages, max_pages;
 505        static atomic_t last_readahead_pages;
 506
 507        max_pages = 1 << READ_ONCE(page_cluster);
 508        if (max_pages <= 1)
 509                return 1;
 510
 511        hits = atomic_xchg(&swapin_readahead_hits, 0);
 512        pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
 513                                  atomic_read(&last_readahead_pages));
 514        if (!hits)
 515                prev_offset = offset;
 516        atomic_set(&last_readahead_pages, pages);
 517
 518        return pages;
 519}
 520
 521/**
 522 * swap_cluster_readahead - swap in pages in hope we need them soon
 523 * @entry: swap entry of this memory
 524 * @gfp_mask: memory allocation flags
 525 * @vmf: fault information
 526 *
 527 * Returns the struct page for entry and addr, after queueing swapin.
 528 *
 529 * Primitive swap readahead code. We simply read an aligned block of
 530 * (1 << page_cluster) entries in the swap area. This method is chosen
 531 * because it doesn't cost us any seek time.  We also make sure to queue
 532 * the 'original' request together with the readahead ones...
 533 *
 534 * This has been extended to use the NUMA policies from the mm triggering
 535 * the readahead.
 536 *
 537 * Caller must hold read mmap_sem if vmf->vma is not NULL.
 538 */
 539struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 540                                struct vm_fault *vmf)
 541{
 542        struct page *page;
 543        unsigned long entry_offset = swp_offset(entry);
 544        unsigned long offset = entry_offset;
 545        unsigned long start_offset, end_offset;
 546        unsigned long mask;
 547        struct swap_info_struct *si = swp_swap_info(entry);
 548        struct blk_plug plug;
 549        bool do_poll = true, page_allocated;
 550        struct vm_area_struct *vma = vmf->vma;
 551        unsigned long addr = vmf->address;
 552
 553        mask = swapin_nr_pages(offset) - 1;
 554        if (!mask)
 555                goto skip;
 556
 557        /* Test swap type to make sure the dereference is safe */
 558        if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) {
 559                struct inode *inode = si->swap_file->f_mapping->host;
 560                if (inode_read_congested(inode))
 561                        goto skip;
 562        }
 563
 564        do_poll = false;
 565        /* Read a page_cluster sized and aligned cluster around offset. */
 566        start_offset = offset & ~mask;
 567        end_offset = offset | mask;
 568        if (!start_offset)      /* First page is swap header. */
 569                start_offset++;
 570        if (end_offset >= si->max)
 571                end_offset = si->max - 1;
 572
 573        blk_start_plug(&plug);
 574        for (offset = start_offset; offset <= end_offset ; offset++) {
 575                /* Ok, do the async read-ahead now */
 576                page = __read_swap_cache_async(
 577                        swp_entry(swp_type(entry), offset),
 578                        gfp_mask, vma, addr, &page_allocated);
 579                if (!page)
 580                        continue;
 581                if (page_allocated) {
 582                        swap_readpage(page, false);
 583                        if (offset != entry_offset) {
 584                                SetPageReadahead(page);
 585                                count_vm_event(SWAP_RA);
 586                        }
 587                }
 588                put_page(page);
 589        }
 590        blk_finish_plug(&plug);
 591
 592        lru_add_drain();        /* Push any new pages onto the LRU now */
 593skip:
 594        return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
 595}
 596
 597int init_swap_address_space(unsigned int type, unsigned long nr_pages)
 598{
 599        struct address_space *spaces, *space;
 600        unsigned int i, nr;
 601
 602        nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
 603        spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
 604        if (!spaces)
 605                return -ENOMEM;
 606        for (i = 0; i < nr; i++) {
 607                space = spaces + i;
 608                xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
 609                atomic_set(&space->i_mmap_writable, 0);
 610                space->a_ops = &swap_aops;
 611                /* swap cache doesn't use writeback related tags */
 612                mapping_set_no_writeback_tags(space);
 613        }
 614        nr_swapper_spaces[type] = nr;
 615        swapper_spaces[type] = spaces;
 616
 617        return 0;
 618}
 619
 620void exit_swap_address_space(unsigned int type)
 621{
 622        kvfree(swapper_spaces[type]);
 623        nr_swapper_spaces[type] = 0;
 624        swapper_spaces[type] = NULL;
 625}
 626
 627static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
 628                                     unsigned long faddr,
 629                                     unsigned long lpfn,
 630                                     unsigned long rpfn,
 631                                     unsigned long *start,
 632                                     unsigned long *end)
 633{
 634        *start = max3(lpfn, PFN_DOWN(vma->vm_start),
 635                      PFN_DOWN(faddr & PMD_MASK));
 636        *end = min3(rpfn, PFN_DOWN(vma->vm_end),
 637                    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
 638}
 639
 640static void swap_ra_info(struct vm_fault *vmf,
 641                        struct vma_swap_readahead *ra_info)
 642{
 643        struct vm_area_struct *vma = vmf->vma;
 644        unsigned long ra_val;
 645        swp_entry_t entry;
 646        unsigned long faddr, pfn, fpfn;
 647        unsigned long start, end;
 648        pte_t *pte, *orig_pte;
 649        unsigned int max_win, hits, prev_win, win, left;
 650#ifndef CONFIG_64BIT
 651        pte_t *tpte;
 652#endif
 653
 654        max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
 655                             SWAP_RA_ORDER_CEILING);
 656        if (max_win == 1) {
 657                ra_info->win = 1;
 658                return;
 659        }
 660
 661        faddr = vmf->address;
 662        orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
 663        entry = pte_to_swp_entry(*pte);
 664        if ((unlikely(non_swap_entry(entry)))) {
 665                pte_unmap(orig_pte);
 666                return;
 667        }
 668
 669        fpfn = PFN_DOWN(faddr);
 670        ra_val = GET_SWAP_RA_VAL(vma);
 671        pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
 672        prev_win = SWAP_RA_WIN(ra_val);
 673        hits = SWAP_RA_HITS(ra_val);
 674        ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
 675                                               max_win, prev_win);
 676        atomic_long_set(&vma->swap_readahead_info,
 677                        SWAP_RA_VAL(faddr, win, 0));
 678
 679        if (win == 1) {
 680                pte_unmap(orig_pte);
 681                return;
 682        }
 683
 684        /* Copy the PTEs because the page table may be unmapped */
 685        if (fpfn == pfn + 1)
 686                swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
 687        else if (pfn == fpfn + 1)
 688                swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
 689                                  &start, &end);
 690        else {
 691                left = (win - 1) / 2;
 692                swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
 693                                  &start, &end);
 694        }
 695        ra_info->nr_pte = end - start;
 696        ra_info->offset = fpfn - start;
 697        pte -= ra_info->offset;
 698#ifdef CONFIG_64BIT
 699        ra_info->ptes = pte;
 700#else
 701        tpte = ra_info->ptes;
 702        for (pfn = start; pfn != end; pfn++)
 703                *tpte++ = *pte++;
 704#endif
 705        pte_unmap(orig_pte);
 706}
 707
 708/**
 709 * swap_vma_readahead - swap in pages in hope we need them soon
 710 * @entry: swap entry of this memory
 711 * @gfp_mask: memory allocation flags
 712 * @vmf: fault information
 713 *
 714 * Returns the struct page for entry and addr, after queueing swapin.
 715 *
 716 * Primitive swap readahead code. We simply read in a few pages whoes
 717 * virtual addresses are around the fault address in the same vma.
 718 *
 719 * Caller must hold read mmap_sem if vmf->vma is not NULL.
 720 *
 721 */
 722static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 723                                       struct vm_fault *vmf)
 724{
 725        struct blk_plug plug;
 726        struct vm_area_struct *vma = vmf->vma;
 727        struct page *page;
 728        pte_t *pte, pentry;
 729        swp_entry_t entry;
 730        unsigned int i;
 731        bool page_allocated;
 732        struct vma_swap_readahead ra_info = {0,};
 733
 734        swap_ra_info(vmf, &ra_info);
 735        if (ra_info.win == 1)
 736                goto skip;
 737
 738        blk_start_plug(&plug);
 739        for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
 740             i++, pte++) {
 741                pentry = *pte;
 742                if (pte_none(pentry))
 743                        continue;
 744                if (pte_present(pentry))
 745                        continue;
 746                entry = pte_to_swp_entry(pentry);
 747                if (unlikely(non_swap_entry(entry)))
 748                        continue;
 749                page = __read_swap_cache_async(entry, gfp_mask, vma,
 750                                               vmf->address, &page_allocated);
 751                if (!page)
 752                        continue;
 753                if (page_allocated) {
 754                        swap_readpage(page, false);
 755                        if (i != ra_info.offset) {
 756                                SetPageReadahead(page);
 757                                count_vm_event(SWAP_RA);
 758                        }
 759                }
 760                put_page(page);
 761        }
 762        blk_finish_plug(&plug);
 763        lru_add_drain();
 764skip:
 765        return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
 766                                     ra_info.win == 1);
 767}
 768
 769/**
 770 * swapin_readahead - swap in pages in hope we need them soon
 771 * @entry: swap entry of this memory
 772 * @gfp_mask: memory allocation flags
 773 * @vmf: fault information
 774 *
 775 * Returns the struct page for entry and addr, after queueing swapin.
 776 *
 777 * It's a main entry function for swap readahead. By the configuration,
 778 * it will read ahead blocks by cluster-based(ie, physical disk based)
 779 * or vma-based(ie, virtual address based on faulty address) readahead.
 780 */
 781struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 782                                struct vm_fault *vmf)
 783{
 784        return swap_use_vma_readahead() ?
 785                        swap_vma_readahead(entry, gfp_mask, vmf) :
 786                        swap_cluster_readahead(entry, gfp_mask, vmf);
 787}
 788
 789#ifdef CONFIG_SYSFS
 790static ssize_t vma_ra_enabled_show(struct kobject *kobj,
 791                                     struct kobj_attribute *attr, char *buf)
 792{
 793        return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
 794}
 795static ssize_t vma_ra_enabled_store(struct kobject *kobj,
 796                                      struct kobj_attribute *attr,
 797                                      const char *buf, size_t count)
 798{
 799        if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
 800                enable_vma_readahead = true;
 801        else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
 802                enable_vma_readahead = false;
 803        else
 804                return -EINVAL;
 805
 806        return count;
 807}
 808static struct kobj_attribute vma_ra_enabled_attr =
 809        __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
 810               vma_ra_enabled_store);
 811
 812static struct attribute *swap_attrs[] = {
 813        &vma_ra_enabled_attr.attr,
 814        NULL,
 815};
 816
 817static struct attribute_group swap_attr_group = {
 818        .attrs = swap_attrs,
 819};
 820
 821static int __init swap_init_sysfs(void)
 822{
 823        int err;
 824        struct kobject *swap_kobj;
 825
 826        swap_kobj = kobject_create_and_add("swap", mm_kobj);
 827        if (!swap_kobj) {
 828                pr_err("failed to create swap kobject\n");
 829                return -ENOMEM;
 830        }
 831        err = sysfs_create_group(swap_kobj, &swap_attr_group);
 832        if (err) {
 833                pr_err("failed to register swap group\n");
 834                goto delete_obj;
 835        }
 836        return 0;
 837
 838delete_obj:
 839        kobject_put(swap_kobj);
 840        return err;
 841}
 842subsys_initcall(swap_init_sysfs);
 843#endif
 844