linux/mm/swapfile.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/swapfile.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *  Swap reorganised 29.12.95, Stephen Tweedie
   7 */
   8
   9#include <linux/mm.h>
  10#include <linux/sched/mm.h>
  11#include <linux/sched/task.h>
  12#include <linux/hugetlb.h>
  13#include <linux/mman.h>
  14#include <linux/slab.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/swap.h>
  17#include <linux/vmalloc.h>
  18#include <linux/pagemap.h>
  19#include <linux/namei.h>
  20#include <linux/shmem_fs.h>
  21#include <linux/blkdev.h>
  22#include <linux/random.h>
  23#include <linux/writeback.h>
  24#include <linux/proc_fs.h>
  25#include <linux/seq_file.h>
  26#include <linux/init.h>
  27#include <linux/ksm.h>
  28#include <linux/rmap.h>
  29#include <linux/security.h>
  30#include <linux/backing-dev.h>
  31#include <linux/mutex.h>
  32#include <linux/capability.h>
  33#include <linux/syscalls.h>
  34#include <linux/memcontrol.h>
  35#include <linux/poll.h>
  36#include <linux/oom.h>
  37#include <linux/frontswap.h>
  38#include <linux/swapfile.h>
  39#include <linux/export.h>
  40#include <linux/swap_slots.h>
  41#include <linux/sort.h>
  42
  43#include <asm/pgtable.h>
  44#include <asm/tlbflush.h>
  45#include <linux/swapops.h>
  46#include <linux/swap_cgroup.h>
  47
  48static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  49                                 unsigned char);
  50static void free_swap_count_continuations(struct swap_info_struct *);
  51static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  52
  53DEFINE_SPINLOCK(swap_lock);
  54static unsigned int nr_swapfiles;
  55atomic_long_t nr_swap_pages;
  56/*
  57 * Some modules use swappable objects and may try to swap them out under
  58 * memory pressure (via the shrinker). Before doing so, they may wish to
  59 * check to see if any swap space is available.
  60 */
  61EXPORT_SYMBOL_GPL(nr_swap_pages);
  62/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  63long total_swap_pages;
  64static int least_priority = -1;
  65
  66static const char Bad_file[] = "Bad swap file entry ";
  67static const char Unused_file[] = "Unused swap file entry ";
  68static const char Bad_offset[] = "Bad swap offset entry ";
  69static const char Unused_offset[] = "Unused swap offset entry ";
  70
  71/*
  72 * all active swap_info_structs
  73 * protected with swap_lock, and ordered by priority.
  74 */
  75PLIST_HEAD(swap_active_head);
  76
  77/*
  78 * all available (active, not full) swap_info_structs
  79 * protected with swap_avail_lock, ordered by priority.
  80 * This is used by get_swap_page() instead of swap_active_head
  81 * because swap_active_head includes all swap_info_structs,
  82 * but get_swap_page() doesn't need to look at full ones.
  83 * This uses its own lock instead of swap_lock because when a
  84 * swap_info_struct changes between not-full/full, it needs to
  85 * add/remove itself to/from this list, but the swap_info_struct->lock
  86 * is held and the locking order requires swap_lock to be taken
  87 * before any swap_info_struct->lock.
  88 */
  89static struct plist_head *swap_avail_heads;
  90static DEFINE_SPINLOCK(swap_avail_lock);
  91
  92struct swap_info_struct *swap_info[MAX_SWAPFILES];
  93
  94static DEFINE_MUTEX(swapon_mutex);
  95
  96static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  97/* Activity counter to indicate that a swapon or swapoff has occurred */
  98static atomic_t proc_poll_event = ATOMIC_INIT(0);
  99
 100atomic_t nr_rotate_swap = ATOMIC_INIT(0);
 101
 102static struct swap_info_struct *swap_type_to_swap_info(int type)
 103{
 104        if (type >= READ_ONCE(nr_swapfiles))
 105                return NULL;
 106
 107        smp_rmb();      /* Pairs with smp_wmb in alloc_swap_info. */
 108        return READ_ONCE(swap_info[type]);
 109}
 110
 111static inline unsigned char swap_count(unsigned char ent)
 112{
 113        return ent & ~SWAP_HAS_CACHE;   /* may include COUNT_CONTINUED flag */
 114}
 115
 116/* Reclaim the swap entry anyway if possible */
 117#define TTRS_ANYWAY             0x1
 118/*
 119 * Reclaim the swap entry if there are no more mappings of the
 120 * corresponding page
 121 */
 122#define TTRS_UNMAPPED           0x2
 123/* Reclaim the swap entry if swap is getting full*/
 124#define TTRS_FULL               0x4
 125
 126/* returns 1 if swap entry is freed */
 127static int __try_to_reclaim_swap(struct swap_info_struct *si,
 128                                 unsigned long offset, unsigned long flags)
 129{
 130        swp_entry_t entry = swp_entry(si->type, offset);
 131        struct page *page;
 132        int ret = 0;
 133
 134        page = find_get_page(swap_address_space(entry), offset);
 135        if (!page)
 136                return 0;
 137        /*
 138         * When this function is called from scan_swap_map_slots() and it's
 139         * called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
 140         * here. We have to use trylock for avoiding deadlock. This is a special
 141         * case and you should use try_to_free_swap() with explicit lock_page()
 142         * in usual operations.
 143         */
 144        if (trylock_page(page)) {
 145                if ((flags & TTRS_ANYWAY) ||
 146                    ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
 147                    ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
 148                        ret = try_to_free_swap(page);
 149                unlock_page(page);
 150        }
 151        put_page(page);
 152        return ret;
 153}
 154
 155static inline struct swap_extent *first_se(struct swap_info_struct *sis)
 156{
 157        struct rb_node *rb = rb_first(&sis->swap_extent_root);
 158        return rb_entry(rb, struct swap_extent, rb_node);
 159}
 160
 161static inline struct swap_extent *next_se(struct swap_extent *se)
 162{
 163        struct rb_node *rb = rb_next(&se->rb_node);
 164        return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
 165}
 166
 167/*
 168 * swapon tell device that all the old swap contents can be discarded,
 169 * to allow the swap device to optimize its wear-levelling.
 170 */
 171static int discard_swap(struct swap_info_struct *si)
 172{
 173        struct swap_extent *se;
 174        sector_t start_block;
 175        sector_t nr_blocks;
 176        int err = 0;
 177
 178        /* Do not discard the swap header page! */
 179        se = first_se(si);
 180        start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 181        nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 182        if (nr_blocks) {
 183                err = blkdev_issue_discard(si->bdev, start_block,
 184                                nr_blocks, GFP_KERNEL, 0);
 185                if (err)
 186                        return err;
 187                cond_resched();
 188        }
 189
 190        for (se = next_se(se); se; se = next_se(se)) {
 191                start_block = se->start_block << (PAGE_SHIFT - 9);
 192                nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 193
 194                err = blkdev_issue_discard(si->bdev, start_block,
 195                                nr_blocks, GFP_KERNEL, 0);
 196                if (err)
 197                        break;
 198
 199                cond_resched();
 200        }
 201        return err;             /* That will often be -EOPNOTSUPP */
 202}
 203
 204static struct swap_extent *
 205offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
 206{
 207        struct swap_extent *se;
 208        struct rb_node *rb;
 209
 210        rb = sis->swap_extent_root.rb_node;
 211        while (rb) {
 212                se = rb_entry(rb, struct swap_extent, rb_node);
 213                if (offset < se->start_page)
 214                        rb = rb->rb_left;
 215                else if (offset >= se->start_page + se->nr_pages)
 216                        rb = rb->rb_right;
 217                else
 218                        return se;
 219        }
 220        /* It *must* be present */
 221        BUG();
 222}
 223
 224/*
 225 * swap allocation tell device that a cluster of swap can now be discarded,
 226 * to allow the swap device to optimize its wear-levelling.
 227 */
 228static void discard_swap_cluster(struct swap_info_struct *si,
 229                                 pgoff_t start_page, pgoff_t nr_pages)
 230{
 231        struct swap_extent *se = offset_to_swap_extent(si, start_page);
 232
 233        while (nr_pages) {
 234                pgoff_t offset = start_page - se->start_page;
 235                sector_t start_block = se->start_block + offset;
 236                sector_t nr_blocks = se->nr_pages - offset;
 237
 238                if (nr_blocks > nr_pages)
 239                        nr_blocks = nr_pages;
 240                start_page += nr_blocks;
 241                nr_pages -= nr_blocks;
 242
 243                start_block <<= PAGE_SHIFT - 9;
 244                nr_blocks <<= PAGE_SHIFT - 9;
 245                if (blkdev_issue_discard(si->bdev, start_block,
 246                                        nr_blocks, GFP_NOIO, 0))
 247                        break;
 248
 249                se = next_se(se);
 250        }
 251}
 252
 253#ifdef CONFIG_THP_SWAP
 254#define SWAPFILE_CLUSTER        HPAGE_PMD_NR
 255
 256#define swap_entry_size(size)   (size)
 257#else
 258#define SWAPFILE_CLUSTER        256
 259
 260/*
 261 * Define swap_entry_size() as constant to let compiler to optimize
 262 * out some code if !CONFIG_THP_SWAP
 263 */
 264#define swap_entry_size(size)   1
 265#endif
 266#define LATENCY_LIMIT           256
 267
 268static inline void cluster_set_flag(struct swap_cluster_info *info,
 269        unsigned int flag)
 270{
 271        info->flags = flag;
 272}
 273
 274static inline unsigned int cluster_count(struct swap_cluster_info *info)
 275{
 276        return info->data;
 277}
 278
 279static inline void cluster_set_count(struct swap_cluster_info *info,
 280                                     unsigned int c)
 281{
 282        info->data = c;
 283}
 284
 285static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 286                                         unsigned int c, unsigned int f)
 287{
 288        info->flags = f;
 289        info->data = c;
 290}
 291
 292static inline unsigned int cluster_next(struct swap_cluster_info *info)
 293{
 294        return info->data;
 295}
 296
 297static inline void cluster_set_next(struct swap_cluster_info *info,
 298                                    unsigned int n)
 299{
 300        info->data = n;
 301}
 302
 303static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 304                                         unsigned int n, unsigned int f)
 305{
 306        info->flags = f;
 307        info->data = n;
 308}
 309
 310static inline bool cluster_is_free(struct swap_cluster_info *info)
 311{
 312        return info->flags & CLUSTER_FLAG_FREE;
 313}
 314
 315static inline bool cluster_is_null(struct swap_cluster_info *info)
 316{
 317        return info->flags & CLUSTER_FLAG_NEXT_NULL;
 318}
 319
 320static inline void cluster_set_null(struct swap_cluster_info *info)
 321{
 322        info->flags = CLUSTER_FLAG_NEXT_NULL;
 323        info->data = 0;
 324}
 325
 326static inline bool cluster_is_huge(struct swap_cluster_info *info)
 327{
 328        if (IS_ENABLED(CONFIG_THP_SWAP))
 329                return info->flags & CLUSTER_FLAG_HUGE;
 330        return false;
 331}
 332
 333static inline void cluster_clear_huge(struct swap_cluster_info *info)
 334{
 335        info->flags &= ~CLUSTER_FLAG_HUGE;
 336}
 337
 338static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
 339                                                     unsigned long offset)
 340{
 341        struct swap_cluster_info *ci;
 342
 343        ci = si->cluster_info;
 344        if (ci) {
 345                ci += offset / SWAPFILE_CLUSTER;
 346                spin_lock(&ci->lock);
 347        }
 348        return ci;
 349}
 350
 351static inline void unlock_cluster(struct swap_cluster_info *ci)
 352{
 353        if (ci)
 354                spin_unlock(&ci->lock);
 355}
 356
 357/*
 358 * Determine the locking method in use for this device.  Return
 359 * swap_cluster_info if SSD-style cluster-based locking is in place.
 360 */
 361static inline struct swap_cluster_info *lock_cluster_or_swap_info(
 362                struct swap_info_struct *si, unsigned long offset)
 363{
 364        struct swap_cluster_info *ci;
 365
 366        /* Try to use fine-grained SSD-style locking if available: */
 367        ci = lock_cluster(si, offset);
 368        /* Otherwise, fall back to traditional, coarse locking: */
 369        if (!ci)
 370                spin_lock(&si->lock);
 371
 372        return ci;
 373}
 374
 375static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
 376                                               struct swap_cluster_info *ci)
 377{
 378        if (ci)
 379                unlock_cluster(ci);
 380        else
 381                spin_unlock(&si->lock);
 382}
 383
 384static inline bool cluster_list_empty(struct swap_cluster_list *list)
 385{
 386        return cluster_is_null(&list->head);
 387}
 388
 389static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
 390{
 391        return cluster_next(&list->head);
 392}
 393
 394static void cluster_list_init(struct swap_cluster_list *list)
 395{
 396        cluster_set_null(&list->head);
 397        cluster_set_null(&list->tail);
 398}
 399
 400static void cluster_list_add_tail(struct swap_cluster_list *list,
 401                                  struct swap_cluster_info *ci,
 402                                  unsigned int idx)
 403{
 404        if (cluster_list_empty(list)) {
 405                cluster_set_next_flag(&list->head, idx, 0);
 406                cluster_set_next_flag(&list->tail, idx, 0);
 407        } else {
 408                struct swap_cluster_info *ci_tail;
 409                unsigned int tail = cluster_next(&list->tail);
 410
 411                /*
 412                 * Nested cluster lock, but both cluster locks are
 413                 * only acquired when we held swap_info_struct->lock
 414                 */
 415                ci_tail = ci + tail;
 416                spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
 417                cluster_set_next(ci_tail, idx);
 418                spin_unlock(&ci_tail->lock);
 419                cluster_set_next_flag(&list->tail, idx, 0);
 420        }
 421}
 422
 423static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
 424                                           struct swap_cluster_info *ci)
 425{
 426        unsigned int idx;
 427
 428        idx = cluster_next(&list->head);
 429        if (cluster_next(&list->tail) == idx) {
 430                cluster_set_null(&list->head);
 431                cluster_set_null(&list->tail);
 432        } else
 433                cluster_set_next_flag(&list->head,
 434                                      cluster_next(&ci[idx]), 0);
 435
 436        return idx;
 437}
 438
 439/* Add a cluster to discard list and schedule it to do discard */
 440static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 441                unsigned int idx)
 442{
 443        /*
 444         * If scan_swap_map() can't find a free cluster, it will check
 445         * si->swap_map directly. To make sure the discarding cluster isn't
 446         * taken by scan_swap_map(), mark the swap entries bad (occupied). It
 447         * will be cleared after discard
 448         */
 449        memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 450                        SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 451
 452        cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
 453
 454        schedule_work(&si->discard_work);
 455}
 456
 457static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
 458{
 459        struct swap_cluster_info *ci = si->cluster_info;
 460
 461        cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
 462        cluster_list_add_tail(&si->free_clusters, ci, idx);
 463}
 464
 465/*
 466 * Doing discard actually. After a cluster discard is finished, the cluster
 467 * will be added to free cluster list. caller should hold si->lock.
 468*/
 469static void swap_do_scheduled_discard(struct swap_info_struct *si)
 470{
 471        struct swap_cluster_info *info, *ci;
 472        unsigned int idx;
 473
 474        info = si->cluster_info;
 475
 476        while (!cluster_list_empty(&si->discard_clusters)) {
 477                idx = cluster_list_del_first(&si->discard_clusters, info);
 478                spin_unlock(&si->lock);
 479
 480                discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 481                                SWAPFILE_CLUSTER);
 482
 483                spin_lock(&si->lock);
 484                ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
 485                __free_cluster(si, idx);
 486                memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 487                                0, SWAPFILE_CLUSTER);
 488                unlock_cluster(ci);
 489        }
 490}
 491
 492static void swap_discard_work(struct work_struct *work)
 493{
 494        struct swap_info_struct *si;
 495
 496        si = container_of(work, struct swap_info_struct, discard_work);
 497
 498        spin_lock(&si->lock);
 499        swap_do_scheduled_discard(si);
 500        spin_unlock(&si->lock);
 501}
 502
 503static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
 504{
 505        struct swap_cluster_info *ci = si->cluster_info;
 506
 507        VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
 508        cluster_list_del_first(&si->free_clusters, ci);
 509        cluster_set_count_flag(ci + idx, 0, 0);
 510}
 511
 512static void free_cluster(struct swap_info_struct *si, unsigned long idx)
 513{
 514        struct swap_cluster_info *ci = si->cluster_info + idx;
 515
 516        VM_BUG_ON(cluster_count(ci) != 0);
 517        /*
 518         * If the swap is discardable, prepare discard the cluster
 519         * instead of free it immediately. The cluster will be freed
 520         * after discard.
 521         */
 522        if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 523            (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 524                swap_cluster_schedule_discard(si, idx);
 525                return;
 526        }
 527
 528        __free_cluster(si, idx);
 529}
 530
 531/*
 532 * The cluster corresponding to page_nr will be used. The cluster will be
 533 * removed from free cluster list and its usage counter will be increased.
 534 */
 535static void inc_cluster_info_page(struct swap_info_struct *p,
 536        struct swap_cluster_info *cluster_info, unsigned long page_nr)
 537{
 538        unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 539
 540        if (!cluster_info)
 541                return;
 542        if (cluster_is_free(&cluster_info[idx]))
 543                alloc_cluster(p, idx);
 544
 545        VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 546        cluster_set_count(&cluster_info[idx],
 547                cluster_count(&cluster_info[idx]) + 1);
 548}
 549
 550/*
 551 * The cluster corresponding to page_nr decreases one usage. If the usage
 552 * counter becomes 0, which means no page in the cluster is in using, we can
 553 * optionally discard the cluster and add it to free cluster list.
 554 */
 555static void dec_cluster_info_page(struct swap_info_struct *p,
 556        struct swap_cluster_info *cluster_info, unsigned long page_nr)
 557{
 558        unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 559
 560        if (!cluster_info)
 561                return;
 562
 563        VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 564        cluster_set_count(&cluster_info[idx],
 565                cluster_count(&cluster_info[idx]) - 1);
 566
 567        if (cluster_count(&cluster_info[idx]) == 0)
 568                free_cluster(p, idx);
 569}
 570
 571/*
 572 * It's possible scan_swap_map() uses a free cluster in the middle of free
 573 * cluster list. Avoiding such abuse to avoid list corruption.
 574 */
 575static bool
 576scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 577        unsigned long offset)
 578{
 579        struct percpu_cluster *percpu_cluster;
 580        bool conflict;
 581
 582        offset /= SWAPFILE_CLUSTER;
 583        conflict = !cluster_list_empty(&si->free_clusters) &&
 584                offset != cluster_list_first(&si->free_clusters) &&
 585                cluster_is_free(&si->cluster_info[offset]);
 586
 587        if (!conflict)
 588                return false;
 589
 590        percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 591        cluster_set_null(&percpu_cluster->index);
 592        return true;
 593}
 594
 595/*
 596 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 597 * might involve allocating a new cluster for current CPU too.
 598 */
 599static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 600        unsigned long *offset, unsigned long *scan_base)
 601{
 602        struct percpu_cluster *cluster;
 603        struct swap_cluster_info *ci;
 604        bool found_free;
 605        unsigned long tmp, max;
 606
 607new_cluster:
 608        cluster = this_cpu_ptr(si->percpu_cluster);
 609        if (cluster_is_null(&cluster->index)) {
 610                if (!cluster_list_empty(&si->free_clusters)) {
 611                        cluster->index = si->free_clusters.head;
 612                        cluster->next = cluster_next(&cluster->index) *
 613                                        SWAPFILE_CLUSTER;
 614                } else if (!cluster_list_empty(&si->discard_clusters)) {
 615                        /*
 616                         * we don't have free cluster but have some clusters in
 617                         * discarding, do discard now and reclaim them
 618                         */
 619                        swap_do_scheduled_discard(si);
 620                        *scan_base = *offset = si->cluster_next;
 621                        goto new_cluster;
 622                } else
 623                        return false;
 624        }
 625
 626        found_free = false;
 627
 628        /*
 629         * Other CPUs can use our cluster if they can't find a free cluster,
 630         * check if there is still free entry in the cluster
 631         */
 632        tmp = cluster->next;
 633        max = min_t(unsigned long, si->max,
 634                    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
 635        if (tmp >= max) {
 636                cluster_set_null(&cluster->index);
 637                goto new_cluster;
 638        }
 639        ci = lock_cluster(si, tmp);
 640        while (tmp < max) {
 641                if (!si->swap_map[tmp]) {
 642                        found_free = true;
 643                        break;
 644                }
 645                tmp++;
 646        }
 647        unlock_cluster(ci);
 648        if (!found_free) {
 649                cluster_set_null(&cluster->index);
 650                goto new_cluster;
 651        }
 652        cluster->next = tmp + 1;
 653        *offset = tmp;
 654        *scan_base = tmp;
 655        return found_free;
 656}
 657
 658static void __del_from_avail_list(struct swap_info_struct *p)
 659{
 660        int nid;
 661
 662        for_each_node(nid)
 663                plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
 664}
 665
 666static void del_from_avail_list(struct swap_info_struct *p)
 667{
 668        spin_lock(&swap_avail_lock);
 669        __del_from_avail_list(p);
 670        spin_unlock(&swap_avail_lock);
 671}
 672
 673static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
 674                             unsigned int nr_entries)
 675{
 676        unsigned int end = offset + nr_entries - 1;
 677
 678        if (offset == si->lowest_bit)
 679                si->lowest_bit += nr_entries;
 680        if (end == si->highest_bit)
 681                si->highest_bit -= nr_entries;
 682        si->inuse_pages += nr_entries;
 683        if (si->inuse_pages == si->pages) {
 684                si->lowest_bit = si->max;
 685                si->highest_bit = 0;
 686                del_from_avail_list(si);
 687        }
 688}
 689
 690static void add_to_avail_list(struct swap_info_struct *p)
 691{
 692        int nid;
 693
 694        spin_lock(&swap_avail_lock);
 695        for_each_node(nid) {
 696                WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
 697                plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
 698        }
 699        spin_unlock(&swap_avail_lock);
 700}
 701
 702static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 703                            unsigned int nr_entries)
 704{
 705        unsigned long end = offset + nr_entries - 1;
 706        void (*swap_slot_free_notify)(struct block_device *, unsigned long);
 707
 708        if (offset < si->lowest_bit)
 709                si->lowest_bit = offset;
 710        if (end > si->highest_bit) {
 711                bool was_full = !si->highest_bit;
 712
 713                si->highest_bit = end;
 714                if (was_full && (si->flags & SWP_WRITEOK))
 715                        add_to_avail_list(si);
 716        }
 717        atomic_long_add(nr_entries, &nr_swap_pages);
 718        si->inuse_pages -= nr_entries;
 719        if (si->flags & SWP_BLKDEV)
 720                swap_slot_free_notify =
 721                        si->bdev->bd_disk->fops->swap_slot_free_notify;
 722        else
 723                swap_slot_free_notify = NULL;
 724        while (offset <= end) {
 725                frontswap_invalidate_page(si->type, offset);
 726                if (swap_slot_free_notify)
 727                        swap_slot_free_notify(si->bdev, offset);
 728                offset++;
 729        }
 730}
 731
 732static int scan_swap_map_slots(struct swap_info_struct *si,
 733                               unsigned char usage, int nr,
 734                               swp_entry_t slots[])
 735{
 736        struct swap_cluster_info *ci;
 737        unsigned long offset;
 738        unsigned long scan_base;
 739        unsigned long last_in_cluster = 0;
 740        int latency_ration = LATENCY_LIMIT;
 741        int n_ret = 0;
 742
 743        if (nr > SWAP_BATCH)
 744                nr = SWAP_BATCH;
 745
 746        /*
 747         * We try to cluster swap pages by allocating them sequentially
 748         * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 749         * way, however, we resort to first-free allocation, starting
 750         * a new cluster.  This prevents us from scattering swap pages
 751         * all over the entire swap partition, so that we reduce
 752         * overall disk seek times between swap pages.  -- sct
 753         * But we do now try to find an empty cluster.  -Andrea
 754         * And we let swap pages go all over an SSD partition.  Hugh
 755         */
 756
 757        si->flags += SWP_SCANNING;
 758        scan_base = offset = si->cluster_next;
 759
 760        /* SSD algorithm */
 761        if (si->cluster_info) {
 762                if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 763                        goto checks;
 764                else
 765                        goto scan;
 766        }
 767
 768        if (unlikely(!si->cluster_nr--)) {
 769                if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 770                        si->cluster_nr = SWAPFILE_CLUSTER - 1;
 771                        goto checks;
 772                }
 773
 774                spin_unlock(&si->lock);
 775
 776                /*
 777                 * If seek is expensive, start searching for new cluster from
 778                 * start of partition, to minimize the span of allocated swap.
 779                 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
 780                 * case, just handled by scan_swap_map_try_ssd_cluster() above.
 781                 */
 782                scan_base = offset = si->lowest_bit;
 783                last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 784
 785                /* Locate the first empty (unaligned) cluster */
 786                for (; last_in_cluster <= si->highest_bit; offset++) {
 787                        if (si->swap_map[offset])
 788                                last_in_cluster = offset + SWAPFILE_CLUSTER;
 789                        else if (offset == last_in_cluster) {
 790                                spin_lock(&si->lock);
 791                                offset -= SWAPFILE_CLUSTER - 1;
 792                                si->cluster_next = offset;
 793                                si->cluster_nr = SWAPFILE_CLUSTER - 1;
 794                                goto checks;
 795                        }
 796                        if (unlikely(--latency_ration < 0)) {
 797                                cond_resched();
 798                                latency_ration = LATENCY_LIMIT;
 799                        }
 800                }
 801
 802                offset = scan_base;
 803                spin_lock(&si->lock);
 804                si->cluster_nr = SWAPFILE_CLUSTER - 1;
 805        }
 806
 807checks:
 808        if (si->cluster_info) {
 809                while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
 810                /* take a break if we already got some slots */
 811                        if (n_ret)
 812                                goto done;
 813                        if (!scan_swap_map_try_ssd_cluster(si, &offset,
 814                                                        &scan_base))
 815                                goto scan;
 816                }
 817        }
 818        if (!(si->flags & SWP_WRITEOK))
 819                goto no_page;
 820        if (!si->highest_bit)
 821                goto no_page;
 822        if (offset > si->highest_bit)
 823                scan_base = offset = si->lowest_bit;
 824
 825        ci = lock_cluster(si, offset);
 826        /* reuse swap entry of cache-only swap if not busy. */
 827        if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 828                int swap_was_freed;
 829                unlock_cluster(ci);
 830                spin_unlock(&si->lock);
 831                swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
 832                spin_lock(&si->lock);
 833                /* entry was freed successfully, try to use this again */
 834                if (swap_was_freed)
 835                        goto checks;
 836                goto scan; /* check next one */
 837        }
 838
 839        if (si->swap_map[offset]) {
 840                unlock_cluster(ci);
 841                if (!n_ret)
 842                        goto scan;
 843                else
 844                        goto done;
 845        }
 846        si->swap_map[offset] = usage;
 847        inc_cluster_info_page(si, si->cluster_info, offset);
 848        unlock_cluster(ci);
 849
 850        swap_range_alloc(si, offset, 1);
 851        si->cluster_next = offset + 1;
 852        slots[n_ret++] = swp_entry(si->type, offset);
 853
 854        /* got enough slots or reach max slots? */
 855        if ((n_ret == nr) || (offset >= si->highest_bit))
 856                goto done;
 857
 858        /* search for next available slot */
 859
 860        /* time to take a break? */
 861        if (unlikely(--latency_ration < 0)) {
 862                if (n_ret)
 863                        goto done;
 864                spin_unlock(&si->lock);
 865                cond_resched();
 866                spin_lock(&si->lock);
 867                latency_ration = LATENCY_LIMIT;
 868        }
 869
 870        /* try to get more slots in cluster */
 871        if (si->cluster_info) {
 872                if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 873                        goto checks;
 874                else
 875                        goto done;
 876        }
 877        /* non-ssd case */
 878        ++offset;
 879
 880        /* non-ssd case, still more slots in cluster? */
 881        if (si->cluster_nr && !si->swap_map[offset]) {
 882                --si->cluster_nr;
 883                goto checks;
 884        }
 885
 886done:
 887        si->flags -= SWP_SCANNING;
 888        return n_ret;
 889
 890scan:
 891        spin_unlock(&si->lock);
 892        while (++offset <= si->highest_bit) {
 893                if (!si->swap_map[offset]) {
 894                        spin_lock(&si->lock);
 895                        goto checks;
 896                }
 897                if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 898                        spin_lock(&si->lock);
 899                        goto checks;
 900                }
 901                if (unlikely(--latency_ration < 0)) {
 902                        cond_resched();
 903                        latency_ration = LATENCY_LIMIT;
 904                }
 905        }
 906        offset = si->lowest_bit;
 907        while (offset < scan_base) {
 908                if (!si->swap_map[offset]) {
 909                        spin_lock(&si->lock);
 910                        goto checks;
 911                }
 912                if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 913                        spin_lock(&si->lock);
 914                        goto checks;
 915                }
 916                if (unlikely(--latency_ration < 0)) {
 917                        cond_resched();
 918                        latency_ration = LATENCY_LIMIT;
 919                }
 920                offset++;
 921        }
 922        spin_lock(&si->lock);
 923
 924no_page:
 925        si->flags -= SWP_SCANNING;
 926        return n_ret;
 927}
 928
 929static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
 930{
 931        unsigned long idx;
 932        struct swap_cluster_info *ci;
 933        unsigned long offset, i;
 934        unsigned char *map;
 935
 936        /*
 937         * Should not even be attempting cluster allocations when huge
 938         * page swap is disabled.  Warn and fail the allocation.
 939         */
 940        if (!IS_ENABLED(CONFIG_THP_SWAP)) {
 941                VM_WARN_ON_ONCE(1);
 942                return 0;
 943        }
 944
 945        if (cluster_list_empty(&si->free_clusters))
 946                return 0;
 947
 948        idx = cluster_list_first(&si->free_clusters);
 949        offset = idx * SWAPFILE_CLUSTER;
 950        ci = lock_cluster(si, offset);
 951        alloc_cluster(si, idx);
 952        cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
 953
 954        map = si->swap_map + offset;
 955        for (i = 0; i < SWAPFILE_CLUSTER; i++)
 956                map[i] = SWAP_HAS_CACHE;
 957        unlock_cluster(ci);
 958        swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
 959        *slot = swp_entry(si->type, offset);
 960
 961        return 1;
 962}
 963
 964static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
 965{
 966        unsigned long offset = idx * SWAPFILE_CLUSTER;
 967        struct swap_cluster_info *ci;
 968
 969        ci = lock_cluster(si, offset);
 970        memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
 971        cluster_set_count_flag(ci, 0, 0);
 972        free_cluster(si, idx);
 973        unlock_cluster(ci);
 974        swap_range_free(si, offset, SWAPFILE_CLUSTER);
 975}
 976
 977static unsigned long scan_swap_map(struct swap_info_struct *si,
 978                                   unsigned char usage)
 979{
 980        swp_entry_t entry;
 981        int n_ret;
 982
 983        n_ret = scan_swap_map_slots(si, usage, 1, &entry);
 984
 985        if (n_ret)
 986                return swp_offset(entry);
 987        else
 988                return 0;
 989
 990}
 991
 992int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
 993{
 994        unsigned long size = swap_entry_size(entry_size);
 995        struct swap_info_struct *si, *next;
 996        long avail_pgs;
 997        int n_ret = 0;
 998        int node;
 999
1000        /* Only single cluster request supported */
1001        WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1002
1003        avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1004        if (avail_pgs <= 0)
1005                goto noswap;
1006
1007        if (n_goal > SWAP_BATCH)
1008                n_goal = SWAP_BATCH;
1009
1010        if (n_goal > avail_pgs)
1011                n_goal = avail_pgs;
1012
1013        atomic_long_sub(n_goal * size, &nr_swap_pages);
1014
1015        spin_lock(&swap_avail_lock);
1016
1017start_over:
1018        node = numa_node_id();
1019        plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1020                /* requeue si to after same-priority siblings */
1021                plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1022                spin_unlock(&swap_avail_lock);
1023                spin_lock(&si->lock);
1024                if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1025                        spin_lock(&swap_avail_lock);
1026                        if (plist_node_empty(&si->avail_lists[node])) {
1027                                spin_unlock(&si->lock);
1028                                goto nextsi;
1029                        }
1030                        WARN(!si->highest_bit,
1031                             "swap_info %d in list but !highest_bit\n",
1032                             si->type);
1033                        WARN(!(si->flags & SWP_WRITEOK),
1034                             "swap_info %d in list but !SWP_WRITEOK\n",
1035                             si->type);
1036                        __del_from_avail_list(si);
1037                        spin_unlock(&si->lock);
1038                        goto nextsi;
1039                }
1040                if (size == SWAPFILE_CLUSTER) {
1041                        if (!(si->flags & SWP_FS))
1042                                n_ret = swap_alloc_cluster(si, swp_entries);
1043                } else
1044                        n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1045                                                    n_goal, swp_entries);
1046                spin_unlock(&si->lock);
1047                if (n_ret || size == SWAPFILE_CLUSTER)
1048                        goto check_out;
1049                pr_debug("scan_swap_map of si %d failed to find offset\n",
1050                        si->type);
1051
1052                spin_lock(&swap_avail_lock);
1053nextsi:
1054                /*
1055                 * if we got here, it's likely that si was almost full before,
1056                 * and since scan_swap_map() can drop the si->lock, multiple
1057                 * callers probably all tried to get a page from the same si
1058                 * and it filled up before we could get one; or, the si filled
1059                 * up between us dropping swap_avail_lock and taking si->lock.
1060                 * Since we dropped the swap_avail_lock, the swap_avail_head
1061                 * list may have been modified; so if next is still in the
1062                 * swap_avail_head list then try it, otherwise start over
1063                 * if we have not gotten any slots.
1064                 */
1065                if (plist_node_empty(&next->avail_lists[node]))
1066                        goto start_over;
1067        }
1068
1069        spin_unlock(&swap_avail_lock);
1070
1071check_out:
1072        if (n_ret < n_goal)
1073                atomic_long_add((long)(n_goal - n_ret) * size,
1074                                &nr_swap_pages);
1075noswap:
1076        return n_ret;
1077}
1078
1079/* The only caller of this function is now suspend routine */
1080swp_entry_t get_swap_page_of_type(int type)
1081{
1082        struct swap_info_struct *si = swap_type_to_swap_info(type);
1083        pgoff_t offset;
1084
1085        if (!si)
1086                goto fail;
1087
1088        spin_lock(&si->lock);
1089        if (si->flags & SWP_WRITEOK) {
1090                atomic_long_dec(&nr_swap_pages);
1091                /* This is called for allocating swap entry, not cache */
1092                offset = scan_swap_map(si, 1);
1093                if (offset) {
1094                        spin_unlock(&si->lock);
1095                        return swp_entry(type, offset);
1096                }
1097                atomic_long_inc(&nr_swap_pages);
1098        }
1099        spin_unlock(&si->lock);
1100fail:
1101        return (swp_entry_t) {0};
1102}
1103
1104static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
1105{
1106        struct swap_info_struct *p;
1107        unsigned long offset;
1108
1109        if (!entry.val)
1110                goto out;
1111        p = swp_swap_info(entry);
1112        if (!p)
1113                goto bad_nofile;
1114        if (!(p->flags & SWP_USED))
1115                goto bad_device;
1116        offset = swp_offset(entry);
1117        if (offset >= p->max)
1118                goto bad_offset;
1119        return p;
1120
1121bad_offset:
1122        pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val);
1123        goto out;
1124bad_device:
1125        pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val);
1126        goto out;
1127bad_nofile:
1128        pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val);
1129out:
1130        return NULL;
1131}
1132
1133static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1134{
1135        struct swap_info_struct *p;
1136
1137        p = __swap_info_get(entry);
1138        if (!p)
1139                goto out;
1140        if (!p->swap_map[swp_offset(entry)])
1141                goto bad_free;
1142        return p;
1143
1144bad_free:
1145        pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val);
1146        goto out;
1147out:
1148        return NULL;
1149}
1150
1151static struct swap_info_struct *swap_info_get(swp_entry_t entry)
1152{
1153        struct swap_info_struct *p;
1154
1155        p = _swap_info_get(entry);
1156        if (p)
1157                spin_lock(&p->lock);
1158        return p;
1159}
1160
1161static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1162                                        struct swap_info_struct *q)
1163{
1164        struct swap_info_struct *p;
1165
1166        p = _swap_info_get(entry);
1167
1168        if (p != q) {
1169                if (q != NULL)
1170                        spin_unlock(&q->lock);
1171                if (p != NULL)
1172                        spin_lock(&p->lock);
1173        }
1174        return p;
1175}
1176
1177static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1178                                              unsigned long offset,
1179                                              unsigned char usage)
1180{
1181        unsigned char count;
1182        unsigned char has_cache;
1183
1184        count = p->swap_map[offset];
1185
1186        has_cache = count & SWAP_HAS_CACHE;
1187        count &= ~SWAP_HAS_CACHE;
1188
1189        if (usage == SWAP_HAS_CACHE) {
1190                VM_BUG_ON(!has_cache);
1191                has_cache = 0;
1192        } else if (count == SWAP_MAP_SHMEM) {
1193                /*
1194                 * Or we could insist on shmem.c using a special
1195                 * swap_shmem_free() and free_shmem_swap_and_cache()...
1196                 */
1197                count = 0;
1198        } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1199                if (count == COUNT_CONTINUED) {
1200                        if (swap_count_continued(p, offset, count))
1201                                count = SWAP_MAP_MAX | COUNT_CONTINUED;
1202                        else
1203                                count = SWAP_MAP_MAX;
1204                } else
1205                        count--;
1206        }
1207
1208        usage = count | has_cache;
1209        p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
1210
1211        return usage;
1212}
1213
1214/*
1215 * Check whether swap entry is valid in the swap device.  If so,
1216 * return pointer to swap_info_struct, and keep the swap entry valid
1217 * via preventing the swap device from being swapoff, until
1218 * put_swap_device() is called.  Otherwise return NULL.
1219 *
1220 * The entirety of the RCU read critical section must come before the
1221 * return from or after the call to synchronize_rcu() in
1222 * enable_swap_info() or swapoff().  So if "si->flags & SWP_VALID" is
1223 * true, the si->map, si->cluster_info, etc. must be valid in the
1224 * critical section.
1225 *
1226 * Notice that swapoff or swapoff+swapon can still happen before the
1227 * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock()
1228 * in put_swap_device() if there isn't any other way to prevent
1229 * swapoff, such as page lock, page table lock, etc.  The caller must
1230 * be prepared for that.  For example, the following situation is
1231 * possible.
1232 *
1233 *   CPU1                               CPU2
1234 *   do_swap_page()
1235 *     ...                              swapoff+swapon
1236 *     __read_swap_cache_async()
1237 *       swapcache_prepare()
1238 *         __swap_duplicate()
1239 *           // check swap_map
1240 *     // verify PTE not changed
1241 *
1242 * In __swap_duplicate(), the swap_map need to be checked before
1243 * changing partly because the specified swap entry may be for another
1244 * swap device which has been swapoff.  And in do_swap_page(), after
1245 * the page is read from the swap device, the PTE is verified not
1246 * changed with the page table locked to check whether the swap device
1247 * has been swapoff or swapoff+swapon.
1248 */
1249struct swap_info_struct *get_swap_device(swp_entry_t entry)
1250{
1251        struct swap_info_struct *si;
1252        unsigned long offset;
1253
1254        if (!entry.val)
1255                goto out;
1256        si = swp_swap_info(entry);
1257        if (!si)
1258                goto bad_nofile;
1259
1260        rcu_read_lock();
1261        if (!(si->flags & SWP_VALID))
1262                goto unlock_out;
1263        offset = swp_offset(entry);
1264        if (offset >= si->max)
1265                goto unlock_out;
1266
1267        return si;
1268bad_nofile:
1269        pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1270out:
1271        return NULL;
1272unlock_out:
1273        rcu_read_unlock();
1274        return NULL;
1275}
1276
1277static unsigned char __swap_entry_free(struct swap_info_struct *p,
1278                                       swp_entry_t entry, unsigned char usage)
1279{
1280        struct swap_cluster_info *ci;
1281        unsigned long offset = swp_offset(entry);
1282
1283        ci = lock_cluster_or_swap_info(p, offset);
1284        usage = __swap_entry_free_locked(p, offset, usage);
1285        unlock_cluster_or_swap_info(p, ci);
1286        if (!usage)
1287                free_swap_slot(entry);
1288
1289        return usage;
1290}
1291
1292static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1293{
1294        struct swap_cluster_info *ci;
1295        unsigned long offset = swp_offset(entry);
1296        unsigned char count;
1297
1298        ci = lock_cluster(p, offset);
1299        count = p->swap_map[offset];
1300        VM_BUG_ON(count != SWAP_HAS_CACHE);
1301        p->swap_map[offset] = 0;
1302        dec_cluster_info_page(p, p->cluster_info, offset);
1303        unlock_cluster(ci);
1304
1305        mem_cgroup_uncharge_swap(entry, 1);
1306        swap_range_free(p, offset, 1);
1307}
1308
1309/*
1310 * Caller has made sure that the swap device corresponding to entry
1311 * is still around or has not been recycled.
1312 */
1313void swap_free(swp_entry_t entry)
1314{
1315        struct swap_info_struct *p;
1316
1317        p = _swap_info_get(entry);
1318        if (p)
1319                __swap_entry_free(p, entry, 1);
1320}
1321
1322/*
1323 * Called after dropping swapcache to decrease refcnt to swap entries.
1324 */
1325void put_swap_page(struct page *page, swp_entry_t entry)
1326{
1327        unsigned long offset = swp_offset(entry);
1328        unsigned long idx = offset / SWAPFILE_CLUSTER;
1329        struct swap_cluster_info *ci;
1330        struct swap_info_struct *si;
1331        unsigned char *map;
1332        unsigned int i, free_entries = 0;
1333        unsigned char val;
1334        int size = swap_entry_size(hpage_nr_pages(page));
1335
1336        si = _swap_info_get(entry);
1337        if (!si)
1338                return;
1339
1340        ci = lock_cluster_or_swap_info(si, offset);
1341        if (size == SWAPFILE_CLUSTER) {
1342                VM_BUG_ON(!cluster_is_huge(ci));
1343                map = si->swap_map + offset;
1344                for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1345                        val = map[i];
1346                        VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1347                        if (val == SWAP_HAS_CACHE)
1348                                free_entries++;
1349                }
1350                cluster_clear_huge(ci);
1351                if (free_entries == SWAPFILE_CLUSTER) {
1352                        unlock_cluster_or_swap_info(si, ci);
1353                        spin_lock(&si->lock);
1354                        mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1355                        swap_free_cluster(si, idx);
1356                        spin_unlock(&si->lock);
1357                        return;
1358                }
1359        }
1360        for (i = 0; i < size; i++, entry.val++) {
1361                if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1362                        unlock_cluster_or_swap_info(si, ci);
1363                        free_swap_slot(entry);
1364                        if (i == size - 1)
1365                                return;
1366                        lock_cluster_or_swap_info(si, offset);
1367                }
1368        }
1369        unlock_cluster_or_swap_info(si, ci);
1370}
1371
1372#ifdef CONFIG_THP_SWAP
1373int split_swap_cluster(swp_entry_t entry)
1374{
1375        struct swap_info_struct *si;
1376        struct swap_cluster_info *ci;
1377        unsigned long offset = swp_offset(entry);
1378
1379        si = _swap_info_get(entry);
1380        if (!si)
1381                return -EBUSY;
1382        ci = lock_cluster(si, offset);
1383        cluster_clear_huge(ci);
1384        unlock_cluster(ci);
1385        return 0;
1386}
1387#endif
1388
1389static int swp_entry_cmp(const void *ent1, const void *ent2)
1390{
1391        const swp_entry_t *e1 = ent1, *e2 = ent2;
1392
1393        return (int)swp_type(*e1) - (int)swp_type(*e2);
1394}
1395
1396void swapcache_free_entries(swp_entry_t *entries, int n)
1397{
1398        struct swap_info_struct *p, *prev;
1399        int i;
1400
1401        if (n <= 0)
1402                return;
1403
1404        prev = NULL;
1405        p = NULL;
1406
1407        /*
1408         * Sort swap entries by swap device, so each lock is only taken once.
1409         * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1410         * so low that it isn't necessary to optimize further.
1411         */
1412        if (nr_swapfiles > 1)
1413                sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1414        for (i = 0; i < n; ++i) {
1415                p = swap_info_get_cont(entries[i], prev);
1416                if (p)
1417                        swap_entry_free(p, entries[i]);
1418                prev = p;
1419        }
1420        if (p)
1421                spin_unlock(&p->lock);
1422}
1423
1424/*
1425 * How many references to page are currently swapped out?
1426 * This does not give an exact answer when swap count is continued,
1427 * but does include the high COUNT_CONTINUED flag to allow for that.
1428 */
1429int page_swapcount(struct page *page)
1430{
1431        int count = 0;
1432        struct swap_info_struct *p;
1433        struct swap_cluster_info *ci;
1434        swp_entry_t entry;
1435        unsigned long offset;
1436
1437        entry.val = page_private(page);
1438        p = _swap_info_get(entry);
1439        if (p) {
1440                offset = swp_offset(entry);
1441                ci = lock_cluster_or_swap_info(p, offset);
1442                count = swap_count(p->swap_map[offset]);
1443                unlock_cluster_or_swap_info(p, ci);
1444        }
1445        return count;
1446}
1447
1448int __swap_count(swp_entry_t entry)
1449{
1450        struct swap_info_struct *si;
1451        pgoff_t offset = swp_offset(entry);
1452        int count = 0;
1453
1454        si = get_swap_device(entry);
1455        if (si) {
1456                count = swap_count(si->swap_map[offset]);
1457                put_swap_device(si);
1458        }
1459        return count;
1460}
1461
1462static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1463{
1464        int count = 0;
1465        pgoff_t offset = swp_offset(entry);
1466        struct swap_cluster_info *ci;
1467
1468        ci = lock_cluster_or_swap_info(si, offset);
1469        count = swap_count(si->swap_map[offset]);
1470        unlock_cluster_or_swap_info(si, ci);
1471        return count;
1472}
1473
1474/*
1475 * How many references to @entry are currently swapped out?
1476 * This does not give an exact answer when swap count is continued,
1477 * but does include the high COUNT_CONTINUED flag to allow for that.
1478 */
1479int __swp_swapcount(swp_entry_t entry)
1480{
1481        int count = 0;
1482        struct swap_info_struct *si;
1483
1484        si = get_swap_device(entry);
1485        if (si) {
1486                count = swap_swapcount(si, entry);
1487                put_swap_device(si);
1488        }
1489        return count;
1490}
1491
1492/*
1493 * How many references to @entry are currently swapped out?
1494 * This considers COUNT_CONTINUED so it returns exact answer.
1495 */
1496int swp_swapcount(swp_entry_t entry)
1497{
1498        int count, tmp_count, n;
1499        struct swap_info_struct *p;
1500        struct swap_cluster_info *ci;
1501        struct page *page;
1502        pgoff_t offset;
1503        unsigned char *map;
1504
1505        p = _swap_info_get(entry);
1506        if (!p)
1507                return 0;
1508
1509        offset = swp_offset(entry);
1510
1511        ci = lock_cluster_or_swap_info(p, offset);
1512
1513        count = swap_count(p->swap_map[offset]);
1514        if (!(count & COUNT_CONTINUED))
1515                goto out;
1516
1517        count &= ~COUNT_CONTINUED;
1518        n = SWAP_MAP_MAX + 1;
1519
1520        page = vmalloc_to_page(p->swap_map + offset);
1521        offset &= ~PAGE_MASK;
1522        VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1523
1524        do {
1525                page = list_next_entry(page, lru);
1526                map = kmap_atomic(page);
1527                tmp_count = map[offset];
1528                kunmap_atomic(map);
1529
1530                count += (tmp_count & ~COUNT_CONTINUED) * n;
1531                n *= (SWAP_CONT_MAX + 1);
1532        } while (tmp_count & COUNT_CONTINUED);
1533out:
1534        unlock_cluster_or_swap_info(p, ci);
1535        return count;
1536}
1537
1538static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1539                                         swp_entry_t entry)
1540{
1541        struct swap_cluster_info *ci;
1542        unsigned char *map = si->swap_map;
1543        unsigned long roffset = swp_offset(entry);
1544        unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1545        int i;
1546        bool ret = false;
1547
1548        ci = lock_cluster_or_swap_info(si, offset);
1549        if (!ci || !cluster_is_huge(ci)) {
1550                if (swap_count(map[roffset]))
1551                        ret = true;
1552                goto unlock_out;
1553        }
1554        for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1555                if (swap_count(map[offset + i])) {
1556                        ret = true;
1557                        break;
1558                }
1559        }
1560unlock_out:
1561        unlock_cluster_or_swap_info(si, ci);
1562        return ret;
1563}
1564
1565static bool page_swapped(struct page *page)
1566{
1567        swp_entry_t entry;
1568        struct swap_info_struct *si;
1569
1570        if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
1571                return page_swapcount(page) != 0;
1572
1573        page = compound_head(page);
1574        entry.val = page_private(page);
1575        si = _swap_info_get(entry);
1576        if (si)
1577                return swap_page_trans_huge_swapped(si, entry);
1578        return false;
1579}
1580
1581static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1582                                         int *total_swapcount)
1583{
1584        int i, map_swapcount, _total_mapcount, _total_swapcount;
1585        unsigned long offset = 0;
1586        struct swap_info_struct *si;
1587        struct swap_cluster_info *ci = NULL;
1588        unsigned char *map = NULL;
1589        int mapcount, swapcount = 0;
1590
1591        /* hugetlbfs shouldn't call it */
1592        VM_BUG_ON_PAGE(PageHuge(page), page);
1593
1594        if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
1595                mapcount = page_trans_huge_mapcount(page, total_mapcount);
1596                if (PageSwapCache(page))
1597                        swapcount = page_swapcount(page);
1598                if (total_swapcount)
1599                        *total_swapcount = swapcount;
1600                return mapcount + swapcount;
1601        }
1602
1603        page = compound_head(page);
1604
1605        _total_mapcount = _total_swapcount = map_swapcount = 0;
1606        if (PageSwapCache(page)) {
1607                swp_entry_t entry;
1608
1609                entry.val = page_private(page);
1610                si = _swap_info_get(entry);
1611                if (si) {
1612                        map = si->swap_map;
1613                        offset = swp_offset(entry);
1614                }
1615        }
1616        if (map)
1617                ci = lock_cluster(si, offset);
1618        for (i = 0; i < HPAGE_PMD_NR; i++) {
1619                mapcount = atomic_read(&page[i]._mapcount) + 1;
1620                _total_mapcount += mapcount;
1621                if (map) {
1622                        swapcount = swap_count(map[offset + i]);
1623                        _total_swapcount += swapcount;
1624                }
1625                map_swapcount = max(map_swapcount, mapcount + swapcount);
1626        }
1627        unlock_cluster(ci);
1628        if (PageDoubleMap(page)) {
1629                map_swapcount -= 1;
1630                _total_mapcount -= HPAGE_PMD_NR;
1631        }
1632        mapcount = compound_mapcount(page);
1633        map_swapcount += mapcount;
1634        _total_mapcount += mapcount;
1635        if (total_mapcount)
1636                *total_mapcount = _total_mapcount;
1637        if (total_swapcount)
1638                *total_swapcount = _total_swapcount;
1639
1640        return map_swapcount;
1641}
1642
1643/*
1644 * We can write to an anon page without COW if there are no other references
1645 * to it.  And as a side-effect, free up its swap: because the old content
1646 * on disk will never be read, and seeking back there to write new content
1647 * later would only waste time away from clustering.
1648 *
1649 * NOTE: total_map_swapcount should not be relied upon by the caller if
1650 * reuse_swap_page() returns false, but it may be always overwritten
1651 * (see the other implementation for CONFIG_SWAP=n).
1652 */
1653bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1654{
1655        int count, total_mapcount, total_swapcount;
1656
1657        VM_BUG_ON_PAGE(!PageLocked(page), page);
1658        if (unlikely(PageKsm(page)))
1659                return false;
1660        count = page_trans_huge_map_swapcount(page, &total_mapcount,
1661                                              &total_swapcount);
1662        if (total_map_swapcount)
1663                *total_map_swapcount = total_mapcount + total_swapcount;
1664        if (count == 1 && PageSwapCache(page) &&
1665            (likely(!PageTransCompound(page)) ||
1666             /* The remaining swap count will be freed soon */
1667             total_swapcount == page_swapcount(page))) {
1668                if (!PageWriteback(page)) {
1669                        page = compound_head(page);
1670                        delete_from_swap_cache(page);
1671                        SetPageDirty(page);
1672                } else {
1673                        swp_entry_t entry;
1674                        struct swap_info_struct *p;
1675
1676                        entry.val = page_private(page);
1677                        p = swap_info_get(entry);
1678                        if (p->flags & SWP_STABLE_WRITES) {
1679                                spin_unlock(&p->lock);
1680                                return false;
1681                        }
1682                        spin_unlock(&p->lock);
1683                }
1684        }
1685
1686        return count <= 1;
1687}
1688
1689/*
1690 * If swap is getting full, or if there are no more mappings of this page,
1691 * then try_to_free_swap is called to free its swap space.
1692 */
1693int try_to_free_swap(struct page *page)
1694{
1695        VM_BUG_ON_PAGE(!PageLocked(page), page);
1696
1697        if (!PageSwapCache(page))
1698                return 0;
1699        if (PageWriteback(page))
1700                return 0;
1701        if (page_swapped(page))
1702                return 0;
1703
1704        /*
1705         * Once hibernation has begun to create its image of memory,
1706         * there's a danger that one of the calls to try_to_free_swap()
1707         * - most probably a call from __try_to_reclaim_swap() while
1708         * hibernation is allocating its own swap pages for the image,
1709         * but conceivably even a call from memory reclaim - will free
1710         * the swap from a page which has already been recorded in the
1711         * image as a clean swapcache page, and then reuse its swap for
1712         * another page of the image.  On waking from hibernation, the
1713         * original page might be freed under memory pressure, then
1714         * later read back in from swap, now with the wrong data.
1715         *
1716         * Hibernation suspends storage while it is writing the image
1717         * to disk so check that here.
1718         */
1719        if (pm_suspended_storage())
1720                return 0;
1721
1722        page = compound_head(page);
1723        delete_from_swap_cache(page);
1724        SetPageDirty(page);
1725        return 1;
1726}
1727
1728/*
1729 * Free the swap entry like above, but also try to
1730 * free the page cache entry if it is the last user.
1731 */
1732int free_swap_and_cache(swp_entry_t entry)
1733{
1734        struct swap_info_struct *p;
1735        unsigned char count;
1736
1737        if (non_swap_entry(entry))
1738                return 1;
1739
1740        p = _swap_info_get(entry);
1741        if (p) {
1742                count = __swap_entry_free(p, entry, 1);
1743                if (count == SWAP_HAS_CACHE &&
1744                    !swap_page_trans_huge_swapped(p, entry))
1745                        __try_to_reclaim_swap(p, swp_offset(entry),
1746                                              TTRS_UNMAPPED | TTRS_FULL);
1747        }
1748        return p != NULL;
1749}
1750
1751#ifdef CONFIG_HIBERNATION
1752/*
1753 * Find the swap type that corresponds to given device (if any).
1754 *
1755 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1756 * from 0, in which the swap header is expected to be located.
1757 *
1758 * This is needed for the suspend to disk (aka swsusp).
1759 */
1760int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
1761{
1762        struct block_device *bdev = NULL;
1763        int type;
1764
1765        if (device)
1766                bdev = bdget(device);
1767
1768        spin_lock(&swap_lock);
1769        for (type = 0; type < nr_swapfiles; type++) {
1770                struct swap_info_struct *sis = swap_info[type];
1771
1772                if (!(sis->flags & SWP_WRITEOK))
1773                        continue;
1774
1775                if (!bdev) {
1776                        if (bdev_p)
1777                                *bdev_p = bdgrab(sis->bdev);
1778
1779                        spin_unlock(&swap_lock);
1780                        return type;
1781                }
1782                if (bdev == sis->bdev) {
1783                        struct swap_extent *se = first_se(sis);
1784
1785                        if (se->start_block == offset) {
1786                                if (bdev_p)
1787                                        *bdev_p = bdgrab(sis->bdev);
1788
1789                                spin_unlock(&swap_lock);
1790                                bdput(bdev);
1791                                return type;
1792                        }
1793                }
1794        }
1795        spin_unlock(&swap_lock);
1796        if (bdev)
1797                bdput(bdev);
1798
1799        return -ENODEV;
1800}
1801
1802/*
1803 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1804 * corresponding to given index in swap_info (swap type).
1805 */
1806sector_t swapdev_block(int type, pgoff_t offset)
1807{
1808        struct block_device *bdev;
1809        struct swap_info_struct *si = swap_type_to_swap_info(type);
1810
1811        if (!si || !(si->flags & SWP_WRITEOK))
1812                return 0;
1813        return map_swap_entry(swp_entry(type, offset), &bdev);
1814}
1815
1816/*
1817 * Return either the total number of swap pages of given type, or the number
1818 * of free pages of that type (depending on @free)
1819 *
1820 * This is needed for software suspend
1821 */
1822unsigned int count_swap_pages(int type, int free)
1823{
1824        unsigned int n = 0;
1825
1826        spin_lock(&swap_lock);
1827        if ((unsigned int)type < nr_swapfiles) {
1828                struct swap_info_struct *sis = swap_info[type];
1829
1830                spin_lock(&sis->lock);
1831                if (sis->flags & SWP_WRITEOK) {
1832                        n = sis->pages;
1833                        if (free)
1834                                n -= sis->inuse_pages;
1835                }
1836                spin_unlock(&sis->lock);
1837        }
1838        spin_unlock(&swap_lock);
1839        return n;
1840}
1841#endif /* CONFIG_HIBERNATION */
1842
1843static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1844{
1845        return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
1846}
1847
1848/*
1849 * No need to decide whether this PTE shares the swap entry with others,
1850 * just let do_wp_page work it out if a write is requested later - to
1851 * force COW, vm_page_prot omits write permission from any private vma.
1852 */
1853static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1854                unsigned long addr, swp_entry_t entry, struct page *page)
1855{
1856        struct page *swapcache;
1857        struct mem_cgroup *memcg;
1858        spinlock_t *ptl;
1859        pte_t *pte;
1860        int ret = 1;
1861
1862        swapcache = page;
1863        page = ksm_might_need_to_copy(page, vma, addr);
1864        if (unlikely(!page))
1865                return -ENOMEM;
1866
1867        if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
1868                                &memcg, false)) {
1869                ret = -ENOMEM;
1870                goto out_nolock;
1871        }
1872
1873        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1874        if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1875                mem_cgroup_cancel_charge(page, memcg, false);
1876                ret = 0;
1877                goto out;
1878        }
1879
1880        dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1881        inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1882        get_page(page);
1883        set_pte_at(vma->vm_mm, addr, pte,
1884                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1885        if (page == swapcache) {
1886                page_add_anon_rmap(page, vma, addr, false);
1887                mem_cgroup_commit_charge(page, memcg, true, false);
1888        } else { /* ksm created a completely new copy */
1889                page_add_new_anon_rmap(page, vma, addr, false);
1890                mem_cgroup_commit_charge(page, memcg, false, false);
1891                lru_cache_add_active_or_unevictable(page, vma);
1892        }
1893        swap_free(entry);
1894        /*
1895         * Move the page to the active list so it is not
1896         * immediately swapped out again after swapon.
1897         */
1898        activate_page(page);
1899out:
1900        pte_unmap_unlock(pte, ptl);
1901out_nolock:
1902        if (page != swapcache) {
1903                unlock_page(page);
1904                put_page(page);
1905        }
1906        return ret;
1907}
1908
1909static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1910                        unsigned long addr, unsigned long end,
1911                        unsigned int type, bool frontswap,
1912                        unsigned long *fs_pages_to_unuse)
1913{
1914        struct page *page;
1915        swp_entry_t entry;
1916        pte_t *pte;
1917        struct swap_info_struct *si;
1918        unsigned long offset;
1919        int ret = 0;
1920        volatile unsigned char *swap_map;
1921
1922        si = swap_info[type];
1923        pte = pte_offset_map(pmd, addr);
1924        do {
1925                struct vm_fault vmf;
1926
1927                if (!is_swap_pte(*pte))
1928                        continue;
1929
1930                entry = pte_to_swp_entry(*pte);
1931                if (swp_type(entry) != type)
1932                        continue;
1933
1934                offset = swp_offset(entry);
1935                if (frontswap && !frontswap_test(si, offset))
1936                        continue;
1937
1938                pte_unmap(pte);
1939                swap_map = &si->swap_map[offset];
1940                vmf.vma = vma;
1941                vmf.address = addr;
1942                vmf.pmd = pmd;
1943                page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf);
1944                if (!page) {
1945                        if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
1946                                goto try_next;
1947                        return -ENOMEM;
1948                }
1949
1950                lock_page(page);
1951                wait_on_page_writeback(page);
1952                ret = unuse_pte(vma, pmd, addr, entry, page);
1953                if (ret < 0) {
1954                        unlock_page(page);
1955                        put_page(page);
1956                        goto out;
1957                }
1958
1959                try_to_free_swap(page);
1960                unlock_page(page);
1961                put_page(page);
1962
1963                if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) {
1964                        ret = FRONTSWAP_PAGES_UNUSED;
1965                        goto out;
1966                }
1967try_next:
1968                pte = pte_offset_map(pmd, addr);
1969        } while (pte++, addr += PAGE_SIZE, addr != end);
1970        pte_unmap(pte - 1);
1971
1972        ret = 0;
1973out:
1974        return ret;
1975}
1976
1977static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1978                                unsigned long addr, unsigned long end,
1979                                unsigned int type, bool frontswap,
1980                                unsigned long *fs_pages_to_unuse)
1981{
1982        pmd_t *pmd;
1983        unsigned long next;
1984        int ret;
1985
1986        pmd = pmd_offset(pud, addr);
1987        do {
1988                cond_resched();
1989                next = pmd_addr_end(addr, end);
1990                if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1991                        continue;
1992                ret = unuse_pte_range(vma, pmd, addr, next, type,
1993                                      frontswap, fs_pages_to_unuse);
1994                if (ret)
1995                        return ret;
1996        } while (pmd++, addr = next, addr != end);
1997        return 0;
1998}
1999
2000static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2001                                unsigned long addr, unsigned long end,
2002                                unsigned int type, bool frontswap,
2003                                unsigned long *fs_pages_to_unuse)
2004{
2005        pud_t *pud;
2006        unsigned long next;
2007        int ret;
2008
2009        pud = pud_offset(p4d, addr);
2010        do {
2011                next = pud_addr_end(addr, end);
2012                if (pud_none_or_clear_bad(pud))
2013                        continue;
2014                ret = unuse_pmd_range(vma, pud, addr, next, type,
2015                                      frontswap, fs_pages_to_unuse);
2016                if (ret)
2017                        return ret;
2018        } while (pud++, addr = next, addr != end);
2019        return 0;
2020}
2021
2022static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2023                                unsigned long addr, unsigned long end,
2024                                unsigned int type, bool frontswap,
2025                                unsigned long *fs_pages_to_unuse)
2026{
2027        p4d_t *p4d;
2028        unsigned long next;
2029        int ret;
2030
2031        p4d = p4d_offset(pgd, addr);
2032        do {
2033                next = p4d_addr_end(addr, end);
2034                if (p4d_none_or_clear_bad(p4d))
2035                        continue;
2036                ret = unuse_pud_range(vma, p4d, addr, next, type,
2037                                      frontswap, fs_pages_to_unuse);
2038                if (ret)
2039                        return ret;
2040        } while (p4d++, addr = next, addr != end);
2041        return 0;
2042}
2043
2044static int unuse_vma(struct vm_area_struct *vma, unsigned int type,
2045                     bool frontswap, unsigned long *fs_pages_to_unuse)
2046{
2047        pgd_t *pgd;
2048        unsigned long addr, end, next;
2049        int ret;
2050
2051        addr = vma->vm_start;
2052        end = vma->vm_end;
2053
2054        pgd = pgd_offset(vma->vm_mm, addr);
2055        do {
2056                next = pgd_addr_end(addr, end);
2057                if (pgd_none_or_clear_bad(pgd))
2058                        continue;
2059                ret = unuse_p4d_range(vma, pgd, addr, next, type,
2060                                      frontswap, fs_pages_to_unuse);
2061                if (ret)
2062                        return ret;
2063        } while (pgd++, addr = next, addr != end);
2064        return 0;
2065}
2066
2067static int unuse_mm(struct mm_struct *mm, unsigned int type,
2068                    bool frontswap, unsigned long *fs_pages_to_unuse)
2069{
2070        struct vm_area_struct *vma;
2071        int ret = 0;
2072
2073        down_read(&mm->mmap_sem);
2074        for (vma = mm->mmap; vma; vma = vma->vm_next) {
2075                if (vma->anon_vma) {
2076                        ret = unuse_vma(vma, type, frontswap,
2077                                        fs_pages_to_unuse);
2078                        if (ret)
2079                                break;
2080                }
2081                cond_resched();
2082        }
2083        up_read(&mm->mmap_sem);
2084        return ret;
2085}
2086
2087/*
2088 * Scan swap_map (or frontswap_map if frontswap parameter is true)
2089 * from current position to next entry still in use. Return 0
2090 * if there are no inuse entries after prev till end of the map.
2091 */
2092static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2093                                        unsigned int prev, bool frontswap)
2094{
2095        unsigned int i;
2096        unsigned char count;
2097
2098        /*
2099         * No need for swap_lock here: we're just looking
2100         * for whether an entry is in use, not modifying it; false
2101         * hits are okay, and sys_swapoff() has already prevented new
2102         * allocations from this area (while holding swap_lock).
2103         */
2104        for (i = prev + 1; i < si->max; i++) {
2105                count = READ_ONCE(si->swap_map[i]);
2106                if (count && swap_count(count) != SWAP_MAP_BAD)
2107                        if (!frontswap || frontswap_test(si, i))
2108                                break;
2109                if ((i % LATENCY_LIMIT) == 0)
2110                        cond_resched();
2111        }
2112
2113        if (i == si->max)
2114                i = 0;
2115
2116        return i;
2117}
2118
2119/*
2120 * If the boolean frontswap is true, only unuse pages_to_unuse pages;
2121 * pages_to_unuse==0 means all pages; ignored if frontswap is false
2122 */
2123int try_to_unuse(unsigned int type, bool frontswap,
2124                 unsigned long pages_to_unuse)
2125{
2126        struct mm_struct *prev_mm;
2127        struct mm_struct *mm;
2128        struct list_head *p;
2129        int retval = 0;
2130        struct swap_info_struct *si = swap_info[type];
2131        struct page *page;
2132        swp_entry_t entry;
2133        unsigned int i;
2134
2135        if (!si->inuse_pages)
2136                return 0;
2137
2138        if (!frontswap)
2139                pages_to_unuse = 0;
2140
2141retry:
2142        retval = shmem_unuse(type, frontswap, &pages_to_unuse);
2143        if (retval)
2144                goto out;
2145
2146        prev_mm = &init_mm;
2147        mmget(prev_mm);
2148
2149        spin_lock(&mmlist_lock);
2150        p = &init_mm.mmlist;
2151        while (si->inuse_pages &&
2152               !signal_pending(current) &&
2153               (p = p->next) != &init_mm.mmlist) {
2154
2155                mm = list_entry(p, struct mm_struct, mmlist);
2156                if (!mmget_not_zero(mm))
2157                        continue;
2158                spin_unlock(&mmlist_lock);
2159                mmput(prev_mm);
2160                prev_mm = mm;
2161                retval = unuse_mm(mm, type, frontswap, &pages_to_unuse);
2162
2163                if (retval) {
2164                        mmput(prev_mm);
2165                        goto out;
2166                }
2167
2168                /*
2169                 * Make sure that we aren't completely killing
2170                 * interactive performance.
2171                 */
2172                cond_resched();
2173                spin_lock(&mmlist_lock);
2174        }
2175        spin_unlock(&mmlist_lock);
2176
2177        mmput(prev_mm);
2178
2179        i = 0;
2180        while (si->inuse_pages &&
2181               !signal_pending(current) &&
2182               (i = find_next_to_unuse(si, i, frontswap)) != 0) {
2183
2184                entry = swp_entry(type, i);
2185                page = find_get_page(swap_address_space(entry), i);
2186                if (!page)
2187                        continue;
2188
2189                /*
2190                 * It is conceivable that a racing task removed this page from
2191                 * swap cache just before we acquired the page lock. The page
2192                 * might even be back in swap cache on another swap area. But
2193                 * that is okay, try_to_free_swap() only removes stale pages.
2194                 */
2195                lock_page(page);
2196                wait_on_page_writeback(page);
2197                try_to_free_swap(page);
2198                unlock_page(page);
2199                put_page(page);
2200
2201                /*
2202                 * For frontswap, we just need to unuse pages_to_unuse, if
2203                 * it was specified. Need not check frontswap again here as
2204                 * we already zeroed out pages_to_unuse if not frontswap.
2205                 */
2206                if (pages_to_unuse && --pages_to_unuse == 0)
2207                        goto out;
2208        }
2209
2210        /*
2211         * Lets check again to see if there are still swap entries in the map.
2212         * If yes, we would need to do retry the unuse logic again.
2213         * Under global memory pressure, swap entries can be reinserted back
2214         * into process space after the mmlist loop above passes over them.
2215         *
2216         * Limit the number of retries? No: when mmget_not_zero() above fails,
2217         * that mm is likely to be freeing swap from exit_mmap(), which proceeds
2218         * at its own independent pace; and even shmem_writepage() could have
2219         * been preempted after get_swap_page(), temporarily hiding that swap.
2220         * It's easy and robust (though cpu-intensive) just to keep retrying.
2221         */
2222        if (si->inuse_pages) {
2223                if (!signal_pending(current))
2224                        goto retry;
2225                retval = -EINTR;
2226        }
2227out:
2228        return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
2229}
2230
2231/*
2232 * After a successful try_to_unuse, if no swap is now in use, we know
2233 * we can empty the mmlist.  swap_lock must be held on entry and exit.
2234 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2235 * added to the mmlist just after page_duplicate - before would be racy.
2236 */
2237static void drain_mmlist(void)
2238{
2239        struct list_head *p, *next;
2240        unsigned int type;
2241
2242        for (type = 0; type < nr_swapfiles; type++)
2243                if (swap_info[type]->inuse_pages)
2244                        return;
2245        spin_lock(&mmlist_lock);
2246        list_for_each_safe(p, next, &init_mm.mmlist)
2247                list_del_init(p);
2248        spin_unlock(&mmlist_lock);
2249}
2250
2251/*
2252 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
2253 * corresponds to page offset for the specified swap entry.
2254 * Note that the type of this function is sector_t, but it returns page offset
2255 * into the bdev, not sector offset.
2256 */
2257static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
2258{
2259        struct swap_info_struct *sis;
2260        struct swap_extent *se;
2261        pgoff_t offset;
2262
2263        sis = swp_swap_info(entry);
2264        *bdev = sis->bdev;
2265
2266        offset = swp_offset(entry);
2267        se = offset_to_swap_extent(sis, offset);
2268        return se->start_block + (offset - se->start_page);
2269}
2270
2271/*
2272 * Returns the page offset into bdev for the specified page's swap entry.
2273 */
2274sector_t map_swap_page(struct page *page, struct block_device **bdev)
2275{
2276        swp_entry_t entry;
2277        entry.val = page_private(page);
2278        return map_swap_entry(entry, bdev);
2279}
2280
2281/*
2282 * Free all of a swapdev's extent information
2283 */
2284static void destroy_swap_extents(struct swap_info_struct *sis)
2285{
2286        while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2287                struct rb_node *rb = sis->swap_extent_root.rb_node;
2288                struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2289
2290                rb_erase(rb, &sis->swap_extent_root);
2291                kfree(se);
2292        }
2293
2294        if (sis->flags & SWP_ACTIVATED) {
2295                struct file *swap_file = sis->swap_file;
2296                struct address_space *mapping = swap_file->f_mapping;
2297
2298                sis->flags &= ~SWP_ACTIVATED;
2299                if (mapping->a_ops->swap_deactivate)
2300                        mapping->a_ops->swap_deactivate(swap_file);
2301        }
2302}
2303
2304/*
2305 * Add a block range (and the corresponding page range) into this swapdev's
2306 * extent tree.
2307 *
2308 * This function rather assumes that it is called in ascending page order.
2309 */
2310int
2311add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2312                unsigned long nr_pages, sector_t start_block)
2313{
2314        struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2315        struct swap_extent *se;
2316        struct swap_extent *new_se;
2317
2318        /*
2319         * place the new node at the right most since the
2320         * function is called in ascending page order.
2321         */
2322        while (*link) {
2323                parent = *link;
2324                link = &parent->rb_right;
2325        }
2326
2327        if (parent) {
2328                se = rb_entry(parent, struct swap_extent, rb_node);
2329                BUG_ON(se->start_page + se->nr_pages != start_page);
2330                if (se->start_block + se->nr_pages == start_block) {
2331                        /* Merge it */
2332                        se->nr_pages += nr_pages;
2333                        return 0;
2334                }
2335        }
2336
2337        /* No merge, insert a new extent. */
2338        new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2339        if (new_se == NULL)
2340                return -ENOMEM;
2341        new_se->start_page = start_page;
2342        new_se->nr_pages = nr_pages;
2343        new_se->start_block = start_block;
2344
2345        rb_link_node(&new_se->rb_node, parent, link);
2346        rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2347        return 1;
2348}
2349EXPORT_SYMBOL_GPL(add_swap_extent);
2350
2351/*
2352 * A `swap extent' is a simple thing which maps a contiguous range of pages
2353 * onto a contiguous range of disk blocks.  An ordered list of swap extents
2354 * is built at swapon time and is then used at swap_writepage/swap_readpage
2355 * time for locating where on disk a page belongs.
2356 *
2357 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2358 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2359 * swap files identically.
2360 *
2361 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2362 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2363 * swapfiles are handled *identically* after swapon time.
2364 *
2365 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2366 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
2367 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
2368 * requirements, they are simply tossed out - we will never use those blocks
2369 * for swapping.
2370 *
2371 * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2372 * prevents users from writing to the swap device, which will corrupt memory.
2373 *
2374 * The amount of disk space which a single swap extent represents varies.
2375 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2376 * extents in the list.  To avoid much list walking, we cache the previous
2377 * search location in `curr_swap_extent', and start new searches from there.
2378 * This is extremely effective.  The average number of iterations in
2379 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
2380 */
2381static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2382{
2383        struct file *swap_file = sis->swap_file;
2384        struct address_space *mapping = swap_file->f_mapping;
2385        struct inode *inode = mapping->host;
2386        int ret;
2387
2388        if (S_ISBLK(inode->i_mode)) {
2389                ret = add_swap_extent(sis, 0, sis->max, 0);
2390                *span = sis->pages;
2391                return ret;
2392        }
2393
2394        if (mapping->a_ops->swap_activate) {
2395                ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2396                if (ret >= 0)
2397                        sis->flags |= SWP_ACTIVATED;
2398                if (!ret) {
2399                        sis->flags |= SWP_FS;
2400                        ret = add_swap_extent(sis, 0, sis->max, 0);
2401                        *span = sis->pages;
2402                }
2403                return ret;
2404        }
2405
2406        return generic_swapfile_activate(sis, swap_file, span);
2407}
2408
2409static int swap_node(struct swap_info_struct *p)
2410{
2411        struct block_device *bdev;
2412
2413        if (p->bdev)
2414                bdev = p->bdev;
2415        else
2416                bdev = p->swap_file->f_inode->i_sb->s_bdev;
2417
2418        return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2419}
2420
2421static void setup_swap_info(struct swap_info_struct *p, int prio,
2422                            unsigned char *swap_map,
2423                            struct swap_cluster_info *cluster_info)
2424{
2425        int i;
2426
2427        if (prio >= 0)
2428                p->prio = prio;
2429        else
2430                p->prio = --least_priority;
2431        /*
2432         * the plist prio is negated because plist ordering is
2433         * low-to-high, while swap ordering is high-to-low
2434         */
2435        p->list.prio = -p->prio;
2436        for_each_node(i) {
2437                if (p->prio >= 0)
2438                        p->avail_lists[i].prio = -p->prio;
2439                else {
2440                        if (swap_node(p) == i)
2441                                p->avail_lists[i].prio = 1;
2442                        else
2443                                p->avail_lists[i].prio = -p->prio;
2444                }
2445        }
2446        p->swap_map = swap_map;
2447        p->cluster_info = cluster_info;
2448}
2449
2450static void _enable_swap_info(struct swap_info_struct *p)
2451{
2452        p->flags |= SWP_WRITEOK | SWP_VALID;
2453        atomic_long_add(p->pages, &nr_swap_pages);
2454        total_swap_pages += p->pages;
2455
2456        assert_spin_locked(&swap_lock);
2457        /*
2458         * both lists are plists, and thus priority ordered.
2459         * swap_active_head needs to be priority ordered for swapoff(),
2460         * which on removal of any swap_info_struct with an auto-assigned
2461         * (i.e. negative) priority increments the auto-assigned priority
2462         * of any lower-priority swap_info_structs.
2463         * swap_avail_head needs to be priority ordered for get_swap_page(),
2464         * which allocates swap pages from the highest available priority
2465         * swap_info_struct.
2466         */
2467        plist_add(&p->list, &swap_active_head);
2468        add_to_avail_list(p);
2469}
2470
2471static void enable_swap_info(struct swap_info_struct *p, int prio,
2472                                unsigned char *swap_map,
2473                                struct swap_cluster_info *cluster_info,
2474                                unsigned long *frontswap_map)
2475{
2476        frontswap_init(p->type, frontswap_map);
2477        spin_lock(&swap_lock);
2478        spin_lock(&p->lock);
2479        setup_swap_info(p, prio, swap_map, cluster_info);
2480        spin_unlock(&p->lock);
2481        spin_unlock(&swap_lock);
2482        /*
2483         * Guarantee swap_map, cluster_info, etc. fields are valid
2484         * between get/put_swap_device() if SWP_VALID bit is set
2485         */
2486        synchronize_rcu();
2487        spin_lock(&swap_lock);
2488        spin_lock(&p->lock);
2489        _enable_swap_info(p);
2490        spin_unlock(&p->lock);
2491        spin_unlock(&swap_lock);
2492}
2493
2494static void reinsert_swap_info(struct swap_info_struct *p)
2495{
2496        spin_lock(&swap_lock);
2497        spin_lock(&p->lock);
2498        setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2499        _enable_swap_info(p);
2500        spin_unlock(&p->lock);
2501        spin_unlock(&swap_lock);
2502}
2503
2504bool has_usable_swap(void)
2505{
2506        bool ret = true;
2507
2508        spin_lock(&swap_lock);
2509        if (plist_head_empty(&swap_active_head))
2510                ret = false;
2511        spin_unlock(&swap_lock);
2512        return ret;
2513}
2514
2515SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2516{
2517        struct swap_info_struct *p = NULL;
2518        unsigned char *swap_map;
2519        struct swap_cluster_info *cluster_info;
2520        unsigned long *frontswap_map;
2521        struct file *swap_file, *victim;
2522        struct address_space *mapping;
2523        struct inode *inode;
2524        struct filename *pathname;
2525        int err, found = 0;
2526        unsigned int old_block_size;
2527
2528        if (!capable(CAP_SYS_ADMIN))
2529                return -EPERM;
2530
2531        BUG_ON(!current->mm);
2532
2533        pathname = getname(specialfile);
2534        if (IS_ERR(pathname))
2535                return PTR_ERR(pathname);
2536
2537        victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2538        err = PTR_ERR(victim);
2539        if (IS_ERR(victim))
2540                goto out;
2541
2542        mapping = victim->f_mapping;
2543        spin_lock(&swap_lock);
2544        plist_for_each_entry(p, &swap_active_head, list) {
2545                if (p->flags & SWP_WRITEOK) {
2546                        if (p->swap_file->f_mapping == mapping) {
2547                                found = 1;
2548                                break;
2549                        }
2550                }
2551        }
2552        if (!found) {
2553                err = -EINVAL;
2554                spin_unlock(&swap_lock);
2555                goto out_dput;
2556        }
2557        if (!security_vm_enough_memory_mm(current->mm, p->pages))
2558                vm_unacct_memory(p->pages);
2559        else {
2560                err = -ENOMEM;
2561                spin_unlock(&swap_lock);
2562                goto out_dput;
2563        }
2564        del_from_avail_list(p);
2565        spin_lock(&p->lock);
2566        if (p->prio < 0) {
2567                struct swap_info_struct *si = p;
2568                int nid;
2569
2570                plist_for_each_entry_continue(si, &swap_active_head, list) {
2571                        si->prio++;
2572                        si->list.prio--;
2573                        for_each_node(nid) {
2574                                if (si->avail_lists[nid].prio != 1)
2575                                        si->avail_lists[nid].prio--;
2576                        }
2577                }
2578                least_priority++;
2579        }
2580        plist_del(&p->list, &swap_active_head);
2581        atomic_long_sub(p->pages, &nr_swap_pages);
2582        total_swap_pages -= p->pages;
2583        p->flags &= ~SWP_WRITEOK;
2584        spin_unlock(&p->lock);
2585        spin_unlock(&swap_lock);
2586
2587        disable_swap_slots_cache_lock();
2588
2589        set_current_oom_origin();
2590        err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
2591        clear_current_oom_origin();
2592
2593        if (err) {
2594                /* re-insert swap space back into swap_list */
2595                reinsert_swap_info(p);
2596                reenable_swap_slots_cache_unlock();
2597                goto out_dput;
2598        }
2599
2600        reenable_swap_slots_cache_unlock();
2601
2602        spin_lock(&swap_lock);
2603        spin_lock(&p->lock);
2604        p->flags &= ~SWP_VALID;         /* mark swap device as invalid */
2605        spin_unlock(&p->lock);
2606        spin_unlock(&swap_lock);
2607        /*
2608         * wait for swap operations protected by get/put_swap_device()
2609         * to complete
2610         */
2611        synchronize_rcu();
2612
2613        flush_work(&p->discard_work);
2614
2615        destroy_swap_extents(p);
2616        if (p->flags & SWP_CONTINUED)
2617                free_swap_count_continuations(p);
2618
2619        if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
2620                atomic_dec(&nr_rotate_swap);
2621
2622        mutex_lock(&swapon_mutex);
2623        spin_lock(&swap_lock);
2624        spin_lock(&p->lock);
2625        drain_mmlist();
2626
2627        /* wait for anyone still in scan_swap_map */
2628        p->highest_bit = 0;             /* cuts scans short */
2629        while (p->flags >= SWP_SCANNING) {
2630                spin_unlock(&p->lock);
2631                spin_unlock(&swap_lock);
2632                schedule_timeout_uninterruptible(1);
2633                spin_lock(&swap_lock);
2634                spin_lock(&p->lock);
2635        }
2636
2637        swap_file = p->swap_file;
2638        old_block_size = p->old_block_size;
2639        p->swap_file = NULL;
2640        p->max = 0;
2641        swap_map = p->swap_map;
2642        p->swap_map = NULL;
2643        cluster_info = p->cluster_info;
2644        p->cluster_info = NULL;
2645        frontswap_map = frontswap_map_get(p);
2646        spin_unlock(&p->lock);
2647        spin_unlock(&swap_lock);
2648        frontswap_invalidate_area(p->type);
2649        frontswap_map_set(p, NULL);
2650        mutex_unlock(&swapon_mutex);
2651        free_percpu(p->percpu_cluster);
2652        p->percpu_cluster = NULL;
2653        vfree(swap_map);
2654        kvfree(cluster_info);
2655        kvfree(frontswap_map);
2656        /* Destroy swap account information */
2657        swap_cgroup_swapoff(p->type);
2658        exit_swap_address_space(p->type);
2659
2660        inode = mapping->host;
2661        if (S_ISBLK(inode->i_mode)) {
2662                struct block_device *bdev = I_BDEV(inode);
2663
2664                set_blocksize(bdev, old_block_size);
2665                blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2666        }
2667
2668        inode_lock(inode);
2669        inode->i_flags &= ~S_SWAPFILE;
2670        inode_unlock(inode);
2671        filp_close(swap_file, NULL);
2672
2673        /*
2674         * Clear the SWP_USED flag after all resources are freed so that swapon
2675         * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2676         * not hold p->lock after we cleared its SWP_WRITEOK.
2677         */
2678        spin_lock(&swap_lock);
2679        p->flags = 0;
2680        spin_unlock(&swap_lock);
2681
2682        err = 0;
2683        atomic_inc(&proc_poll_event);
2684        wake_up_interruptible(&proc_poll_wait);
2685
2686out_dput:
2687        filp_close(victim, NULL);
2688out:
2689        putname(pathname);
2690        return err;
2691}
2692
2693#ifdef CONFIG_PROC_FS
2694static __poll_t swaps_poll(struct file *file, poll_table *wait)
2695{
2696        struct seq_file *seq = file->private_data;
2697
2698        poll_wait(file, &proc_poll_wait, wait);
2699
2700        if (seq->poll_event != atomic_read(&proc_poll_event)) {
2701                seq->poll_event = atomic_read(&proc_poll_event);
2702                return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2703        }
2704
2705        return EPOLLIN | EPOLLRDNORM;
2706}
2707
2708/* iterator */
2709static void *swap_start(struct seq_file *swap, loff_t *pos)
2710{
2711        struct swap_info_struct *si;
2712        int type;
2713        loff_t l = *pos;
2714
2715        mutex_lock(&swapon_mutex);
2716
2717        if (!l)
2718                return SEQ_START_TOKEN;
2719
2720        for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2721                if (!(si->flags & SWP_USED) || !si->swap_map)
2722                        continue;
2723                if (!--l)
2724                        return si;
2725        }
2726
2727        return NULL;
2728}
2729
2730static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2731{
2732        struct swap_info_struct *si = v;
2733        int type;
2734
2735        if (v == SEQ_START_TOKEN)
2736                type = 0;
2737        else
2738                type = si->type + 1;
2739
2740        for (; (si = swap_type_to_swap_info(type)); type++) {
2741                if (!(si->flags & SWP_USED) || !si->swap_map)
2742                        continue;
2743                ++*pos;
2744                return si;
2745        }
2746
2747        return NULL;
2748}
2749
2750static void swap_stop(struct seq_file *swap, void *v)
2751{
2752        mutex_unlock(&swapon_mutex);
2753}
2754
2755static int swap_show(struct seq_file *swap, void *v)
2756{
2757        struct swap_info_struct *si = v;
2758        struct file *file;
2759        int len;
2760
2761        if (si == SEQ_START_TOKEN) {
2762                seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
2763                return 0;
2764        }
2765
2766        file = si->swap_file;
2767        len = seq_file_path(swap, file, " \t\n\\");
2768        seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
2769                        len < 40 ? 40 - len : 1, " ",
2770                        S_ISBLK(file_inode(file)->i_mode) ?
2771                                "partition" : "file\t",
2772                        si->pages << (PAGE_SHIFT - 10),
2773                        si->inuse_pages << (PAGE_SHIFT - 10),
2774                        si->prio);
2775        return 0;
2776}
2777
2778static const struct seq_operations swaps_op = {
2779        .start =        swap_start,
2780        .next =         swap_next,
2781        .stop =         swap_stop,
2782        .show =         swap_show
2783};
2784
2785static int swaps_open(struct inode *inode, struct file *file)
2786{
2787        struct seq_file *seq;
2788        int ret;
2789
2790        ret = seq_open(file, &swaps_op);
2791        if (ret)
2792                return ret;
2793
2794        seq = file->private_data;
2795        seq->poll_event = atomic_read(&proc_poll_event);
2796        return 0;
2797}
2798
2799static const struct file_operations proc_swaps_operations = {
2800        .open           = swaps_open,
2801        .read           = seq_read,
2802        .llseek         = seq_lseek,
2803        .release        = seq_release,
2804        .poll           = swaps_poll,
2805};
2806
2807static int __init procswaps_init(void)
2808{
2809        proc_create("swaps", 0, NULL, &proc_swaps_operations);
2810        return 0;
2811}
2812__initcall(procswaps_init);
2813#endif /* CONFIG_PROC_FS */
2814
2815#ifdef MAX_SWAPFILES_CHECK
2816static int __init max_swapfiles_check(void)
2817{
2818        MAX_SWAPFILES_CHECK();
2819        return 0;
2820}
2821late_initcall(max_swapfiles_check);
2822#endif
2823
2824static struct swap_info_struct *alloc_swap_info(void)
2825{
2826        struct swap_info_struct *p;
2827        unsigned int type;
2828        int i;
2829
2830        p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2831        if (!p)
2832                return ERR_PTR(-ENOMEM);
2833
2834        spin_lock(&swap_lock);
2835        for (type = 0; type < nr_swapfiles; type++) {
2836                if (!(swap_info[type]->flags & SWP_USED))
2837                        break;
2838        }
2839        if (type >= MAX_SWAPFILES) {
2840                spin_unlock(&swap_lock);
2841                kvfree(p);
2842                return ERR_PTR(-EPERM);
2843        }
2844        if (type >= nr_swapfiles) {
2845                p->type = type;
2846                WRITE_ONCE(swap_info[type], p);
2847                /*
2848                 * Write swap_info[type] before nr_swapfiles, in case a
2849                 * racing procfs swap_start() or swap_next() is reading them.
2850                 * (We never shrink nr_swapfiles, we never free this entry.)
2851                 */
2852                smp_wmb();
2853                WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
2854        } else {
2855                kvfree(p);
2856                p = swap_info[type];
2857                /*
2858                 * Do not memset this entry: a racing procfs swap_next()
2859                 * would be relying on p->type to remain valid.
2860                 */
2861        }
2862        p->swap_extent_root = RB_ROOT;
2863        plist_node_init(&p->list, 0);
2864        for_each_node(i)
2865                plist_node_init(&p->avail_lists[i], 0);
2866        p->flags = SWP_USED;
2867        spin_unlock(&swap_lock);
2868        spin_lock_init(&p->lock);
2869        spin_lock_init(&p->cont_lock);
2870
2871        return p;
2872}
2873
2874static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2875{
2876        int error;
2877
2878        if (S_ISBLK(inode->i_mode)) {
2879                p->bdev = bdgrab(I_BDEV(inode));
2880                error = blkdev_get(p->bdev,
2881                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2882                if (error < 0) {
2883                        p->bdev = NULL;
2884                        return error;
2885                }
2886                p->old_block_size = block_size(p->bdev);
2887                error = set_blocksize(p->bdev, PAGE_SIZE);
2888                if (error < 0)
2889                        return error;
2890                p->flags |= SWP_BLKDEV;
2891        } else if (S_ISREG(inode->i_mode)) {
2892                p->bdev = inode->i_sb->s_bdev;
2893        }
2894
2895        inode_lock(inode);
2896        if (IS_SWAPFILE(inode))
2897                return -EBUSY;
2898
2899        return 0;
2900}
2901
2902
2903/*
2904 * Find out how many pages are allowed for a single swap device. There
2905 * are two limiting factors:
2906 * 1) the number of bits for the swap offset in the swp_entry_t type, and
2907 * 2) the number of bits in the swap pte, as defined by the different
2908 * architectures.
2909 *
2910 * In order to find the largest possible bit mask, a swap entry with
2911 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
2912 * decoded to a swp_entry_t again, and finally the swap offset is
2913 * extracted.
2914 *
2915 * This will mask all the bits from the initial ~0UL mask that can't
2916 * be encoded in either the swp_entry_t or the architecture definition
2917 * of a swap pte.
2918 */
2919unsigned long generic_max_swapfile_size(void)
2920{
2921        return swp_offset(pte_to_swp_entry(
2922                        swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2923}
2924
2925/* Can be overridden by an architecture for additional checks. */
2926__weak unsigned long max_swapfile_size(void)
2927{
2928        return generic_max_swapfile_size();
2929}
2930
2931static unsigned long read_swap_header(struct swap_info_struct *p,
2932                                        union swap_header *swap_header,
2933                                        struct inode *inode)
2934{
2935        int i;
2936        unsigned long maxpages;
2937        unsigned long swapfilepages;
2938        unsigned long last_page;
2939
2940        if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2941                pr_err("Unable to find swap-space signature\n");
2942                return 0;
2943        }
2944
2945        /* swap partition endianess hack... */
2946        if (swab32(swap_header->info.version) == 1) {
2947                swab32s(&swap_header->info.version);
2948                swab32s(&swap_header->info.last_page);
2949                swab32s(&swap_header->info.nr_badpages);
2950                if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2951                        return 0;
2952                for (i = 0; i < swap_header->info.nr_badpages; i++)
2953                        swab32s(&swap_header->info.badpages[i]);
2954        }
2955        /* Check the swap header's sub-version */
2956        if (swap_header->info.version != 1) {
2957                pr_warn("Unable to handle swap header version %d\n",
2958                        swap_header->info.version);
2959                return 0;
2960        }
2961
2962        p->lowest_bit  = 1;
2963        p->cluster_next = 1;
2964        p->cluster_nr = 0;
2965
2966        maxpages = max_swapfile_size();
2967        last_page = swap_header->info.last_page;
2968        if (!last_page) {
2969                pr_warn("Empty swap-file\n");
2970                return 0;
2971        }
2972        if (last_page > maxpages) {
2973                pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2974                        maxpages << (PAGE_SHIFT - 10),
2975                        last_page << (PAGE_SHIFT - 10));
2976        }
2977        if (maxpages > last_page) {
2978                maxpages = last_page + 1;
2979                /* p->max is an unsigned int: don't overflow it */
2980                if ((unsigned int)maxpages == 0)
2981                        maxpages = UINT_MAX;
2982        }
2983        p->highest_bit = maxpages - 1;
2984
2985        if (!maxpages)
2986                return 0;
2987        swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2988        if (swapfilepages && maxpages > swapfilepages) {
2989                pr_warn("Swap area shorter than signature indicates\n");
2990                return 0;
2991        }
2992        if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2993                return 0;
2994        if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2995                return 0;
2996
2997        return maxpages;
2998}
2999
3000#define SWAP_CLUSTER_INFO_COLS                                          \
3001        DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3002#define SWAP_CLUSTER_SPACE_COLS                                         \
3003        DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3004#define SWAP_CLUSTER_COLS                                               \
3005        max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3006
3007static int setup_swap_map_and_extents(struct swap_info_struct *p,
3008                                        union swap_header *swap_header,
3009                                        unsigned char *swap_map,
3010                                        struct swap_cluster_info *cluster_info,
3011                                        unsigned long maxpages,
3012                                        sector_t *span)
3013{
3014        unsigned int j, k;
3015        unsigned int nr_good_pages;
3016        int nr_extents;
3017        unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3018        unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3019        unsigned long i, idx;
3020
3021        nr_good_pages = maxpages - 1;   /* omit header page */
3022
3023        cluster_list_init(&p->free_clusters);
3024        cluster_list_init(&p->discard_clusters);
3025
3026        for (i = 0; i < swap_header->info.nr_badpages; i++) {
3027                unsigned int page_nr = swap_header->info.badpages[i];
3028                if (page_nr == 0 || page_nr > swap_header->info.last_page)
3029                        return -EINVAL;
3030                if (page_nr < maxpages) {
3031                        swap_map[page_nr] = SWAP_MAP_BAD;
3032                        nr_good_pages--;
3033                        /*
3034                         * Haven't marked the cluster free yet, no list
3035                         * operation involved
3036                         */
3037                        inc_cluster_info_page(p, cluster_info, page_nr);
3038                }
3039        }
3040
3041        /* Haven't marked the cluster free yet, no list operation involved */
3042        for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3043                inc_cluster_info_page(p, cluster_info, i);
3044
3045        if (nr_good_pages) {
3046                swap_map[0] = SWAP_MAP_BAD;
3047                /*
3048                 * Not mark the cluster free yet, no list
3049                 * operation involved
3050                 */
3051                inc_cluster_info_page(p, cluster_info, 0);
3052                p->max = maxpages;
3053                p->pages = nr_good_pages;
3054                nr_extents = setup_swap_extents(p, span);
3055                if (nr_extents < 0)
3056                        return nr_extents;
3057                nr_good_pages = p->pages;
3058        }
3059        if (!nr_good_pages) {
3060                pr_warn("Empty swap-file\n");
3061                return -EINVAL;
3062        }
3063
3064        if (!cluster_info)
3065                return nr_extents;
3066
3067
3068        /*
3069         * Reduce false cache line sharing between cluster_info and
3070         * sharing same address space.
3071         */
3072        for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3073                j = (k + col) % SWAP_CLUSTER_COLS;
3074                for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3075                        idx = i * SWAP_CLUSTER_COLS + j;
3076                        if (idx >= nr_clusters)
3077                                continue;
3078                        if (cluster_count(&cluster_info[idx]))
3079                                continue;
3080                        cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
3081                        cluster_list_add_tail(&p->free_clusters, cluster_info,
3082                                              idx);
3083                }
3084        }
3085        return nr_extents;
3086}
3087
3088/*
3089 * Helper to sys_swapon determining if a given swap
3090 * backing device queue supports DISCARD operations.
3091 */
3092static bool swap_discardable(struct swap_info_struct *si)
3093{
3094        struct request_queue *q = bdev_get_queue(si->bdev);
3095
3096        if (!q || !blk_queue_discard(q))
3097                return false;
3098
3099        return true;
3100}
3101
3102SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3103{
3104        struct swap_info_struct *p;
3105        struct filename *name;
3106        struct file *swap_file = NULL;
3107        struct address_space *mapping;
3108        int prio;
3109        int error;
3110        union swap_header *swap_header;
3111        int nr_extents;
3112        sector_t span;
3113        unsigned long maxpages;
3114        unsigned char *swap_map = NULL;
3115        struct swap_cluster_info *cluster_info = NULL;
3116        unsigned long *frontswap_map = NULL;
3117        struct page *page = NULL;
3118        struct inode *inode = NULL;
3119        bool inced_nr_rotate_swap = false;
3120
3121        if (swap_flags & ~SWAP_FLAGS_VALID)
3122                return -EINVAL;
3123
3124        if (!capable(CAP_SYS_ADMIN))
3125                return -EPERM;
3126
3127        if (!swap_avail_heads)
3128                return -ENOMEM;
3129
3130        p = alloc_swap_info();
3131        if (IS_ERR(p))
3132                return PTR_ERR(p);
3133
3134        INIT_WORK(&p->discard_work, swap_discard_work);
3135
3136        name = getname(specialfile);
3137        if (IS_ERR(name)) {
3138                error = PTR_ERR(name);
3139                name = NULL;
3140                goto bad_swap;
3141        }
3142        swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3143        if (IS_ERR(swap_file)) {
3144                error = PTR_ERR(swap_file);
3145                swap_file = NULL;
3146                goto bad_swap;
3147        }
3148
3149        p->swap_file = swap_file;
3150        mapping = swap_file->f_mapping;
3151        inode = mapping->host;
3152
3153        /* If S_ISREG(inode->i_mode) will do inode_lock(inode); */
3154        error = claim_swapfile(p, inode);
3155        if (unlikely(error))
3156                goto bad_swap;
3157
3158        /*
3159         * Read the swap header.
3160         */
3161        if (!mapping->a_ops->readpage) {
3162                error = -EINVAL;
3163                goto bad_swap;
3164        }
3165        page = read_mapping_page(mapping, 0, swap_file);
3166        if (IS_ERR(page)) {
3167                error = PTR_ERR(page);
3168                goto bad_swap;
3169        }
3170        swap_header = kmap(page);
3171
3172        maxpages = read_swap_header(p, swap_header, inode);
3173        if (unlikely(!maxpages)) {
3174                error = -EINVAL;
3175                goto bad_swap;
3176        }
3177
3178        /* OK, set up the swap map and apply the bad block list */
3179        swap_map = vzalloc(maxpages);
3180        if (!swap_map) {
3181                error = -ENOMEM;
3182                goto bad_swap;
3183        }
3184
3185        if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
3186                p->flags |= SWP_STABLE_WRITES;
3187
3188        if (bdi_cap_synchronous_io(inode_to_bdi(inode)))
3189                p->flags |= SWP_SYNCHRONOUS_IO;
3190
3191        if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
3192                int cpu;
3193                unsigned long ci, nr_cluster;
3194
3195                p->flags |= SWP_SOLIDSTATE;
3196                /*
3197                 * select a random position to start with to help wear leveling
3198                 * SSD
3199                 */
3200                p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
3201                nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3202
3203                cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3204                                        GFP_KERNEL);
3205                if (!cluster_info) {
3206                        error = -ENOMEM;
3207                        goto bad_swap;
3208                }
3209
3210                for (ci = 0; ci < nr_cluster; ci++)
3211                        spin_lock_init(&((cluster_info + ci)->lock));
3212
3213                p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3214                if (!p->percpu_cluster) {
3215                        error = -ENOMEM;
3216                        goto bad_swap;
3217                }
3218                for_each_possible_cpu(cpu) {
3219                        struct percpu_cluster *cluster;
3220                        cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3221                        cluster_set_null(&cluster->index);
3222                }
3223        } else {
3224                atomic_inc(&nr_rotate_swap);
3225                inced_nr_rotate_swap = true;
3226        }
3227
3228        error = swap_cgroup_swapon(p->type, maxpages);
3229        if (error)
3230                goto bad_swap;
3231
3232        nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3233                cluster_info, maxpages, &span);
3234        if (unlikely(nr_extents < 0)) {
3235                error = nr_extents;
3236                goto bad_swap;
3237        }
3238        /* frontswap enabled? set up bit-per-page map for frontswap */
3239        if (IS_ENABLED(CONFIG_FRONTSWAP))
3240                frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
3241                                         sizeof(long),
3242                                         GFP_KERNEL);
3243
3244        if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
3245                /*
3246                 * When discard is enabled for swap with no particular
3247                 * policy flagged, we set all swap discard flags here in
3248                 * order to sustain backward compatibility with older
3249                 * swapon(8) releases.
3250                 */
3251                p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3252                             SWP_PAGE_DISCARD);
3253
3254                /*
3255                 * By flagging sys_swapon, a sysadmin can tell us to
3256                 * either do single-time area discards only, or to just
3257                 * perform discards for released swap page-clusters.
3258                 * Now it's time to adjust the p->flags accordingly.
3259                 */
3260                if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3261                        p->flags &= ~SWP_PAGE_DISCARD;
3262                else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3263                        p->flags &= ~SWP_AREA_DISCARD;
3264
3265                /* issue a swapon-time discard if it's still required */
3266                if (p->flags & SWP_AREA_DISCARD) {
3267                        int err = discard_swap(p);
3268                        if (unlikely(err))
3269                                pr_err("swapon: discard_swap(%p): %d\n",
3270                                        p, err);
3271                }
3272        }
3273
3274        error = init_swap_address_space(p->type, maxpages);
3275        if (error)
3276                goto bad_swap;
3277
3278        /*
3279         * Flush any pending IO and dirty mappings before we start using this
3280         * swap device.
3281         */
3282        inode->i_flags |= S_SWAPFILE;
3283        error = inode_drain_writes(inode);
3284        if (error) {
3285                inode->i_flags &= ~S_SWAPFILE;
3286                goto bad_swap;
3287        }
3288
3289        mutex_lock(&swapon_mutex);
3290        prio = -1;
3291        if (swap_flags & SWAP_FLAG_PREFER)
3292                prio =
3293                  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3294        enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3295
3296        pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3297                p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3298                nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3299                (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3300                (p->flags & SWP_DISCARDABLE) ? "D" : "",
3301                (p->flags & SWP_AREA_DISCARD) ? "s" : "",
3302                (p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3303                (frontswap_map) ? "FS" : "");
3304
3305        mutex_unlock(&swapon_mutex);
3306        atomic_inc(&proc_poll_event);
3307        wake_up_interruptible(&proc_poll_wait);
3308
3309        error = 0;
3310        goto out;
3311bad_swap:
3312        free_percpu(p->percpu_cluster);
3313        p->percpu_cluster = NULL;
3314        if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3315                set_blocksize(p->bdev, p->old_block_size);
3316                blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3317        }
3318        destroy_swap_extents(p);
3319        swap_cgroup_swapoff(p->type);
3320        spin_lock(&swap_lock);
3321        p->swap_file = NULL;
3322        p->flags = 0;
3323        spin_unlock(&swap_lock);
3324        vfree(swap_map);
3325        kvfree(cluster_info);
3326        kvfree(frontswap_map);
3327        if (inced_nr_rotate_swap)
3328                atomic_dec(&nr_rotate_swap);
3329        if (swap_file) {
3330                if (inode) {
3331                        inode_unlock(inode);
3332                        inode = NULL;
3333                }
3334                filp_close(swap_file, NULL);
3335        }
3336out:
3337        if (page && !IS_ERR(page)) {
3338                kunmap(page);
3339                put_page(page);
3340        }
3341        if (name)
3342                putname(name);
3343        if (inode)
3344                inode_unlock(inode);
3345        if (!error)
3346                enable_swap_slots_cache();
3347        return error;
3348}
3349
3350void si_swapinfo(struct sysinfo *val)
3351{
3352        unsigned int type;
3353        unsigned long nr_to_be_unused = 0;
3354
3355        spin_lock(&swap_lock);
3356        for (type = 0; type < nr_swapfiles; type++) {
3357                struct swap_info_struct *si = swap_info[type];
3358
3359                if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3360                        nr_to_be_unused += si->inuse_pages;
3361        }
3362        val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3363        val->totalswap = total_swap_pages + nr_to_be_unused;
3364        spin_unlock(&swap_lock);
3365}
3366
3367/*
3368 * Verify that a swap entry is valid and increment its swap map count.
3369 *
3370 * Returns error code in following case.
3371 * - success -> 0
3372 * - swp_entry is invalid -> EINVAL
3373 * - swp_entry is migration entry -> EINVAL
3374 * - swap-cache reference is requested but there is already one. -> EEXIST
3375 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3376 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3377 */
3378static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3379{
3380        struct swap_info_struct *p;
3381        struct swap_cluster_info *ci;
3382        unsigned long offset;
3383        unsigned char count;
3384        unsigned char has_cache;
3385        int err = -EINVAL;
3386
3387        p = get_swap_device(entry);
3388        if (!p)
3389                goto out;
3390
3391        offset = swp_offset(entry);
3392        ci = lock_cluster_or_swap_info(p, offset);
3393
3394        count = p->swap_map[offset];
3395
3396        /*
3397         * swapin_readahead() doesn't check if a swap entry is valid, so the
3398         * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3399         */
3400        if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3401                err = -ENOENT;
3402                goto unlock_out;
3403        }
3404
3405        has_cache = count & SWAP_HAS_CACHE;
3406        count &= ~SWAP_HAS_CACHE;
3407        err = 0;
3408
3409        if (usage == SWAP_HAS_CACHE) {
3410
3411                /* set SWAP_HAS_CACHE if there is no cache and entry is used */
3412                if (!has_cache && count)
3413                        has_cache = SWAP_HAS_CACHE;
3414                else if (has_cache)             /* someone else added cache */
3415                        err = -EEXIST;
3416                else                            /* no users remaining */
3417                        err = -ENOENT;
3418
3419        } else if (count || has_cache) {
3420
3421                if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3422                        count += usage;
3423                else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3424                        err = -EINVAL;
3425                else if (swap_count_continued(p, offset, count))
3426                        count = COUNT_CONTINUED;
3427                else
3428                        err = -ENOMEM;
3429        } else
3430                err = -ENOENT;                  /* unused swap entry */
3431
3432        p->swap_map[offset] = count | has_cache;
3433
3434unlock_out:
3435        unlock_cluster_or_swap_info(p, ci);
3436out:
3437        if (p)
3438                put_swap_device(p);
3439        return err;
3440}
3441
3442/*
3443 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3444 * (in which case its reference count is never incremented).
3445 */
3446void swap_shmem_alloc(swp_entry_t entry)
3447{
3448        __swap_duplicate(entry, SWAP_MAP_SHMEM);
3449}
3450
3451/*
3452 * Increase reference count of swap entry by 1.
3453 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3454 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3455 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3456 * might occur if a page table entry has got corrupted.
3457 */
3458int swap_duplicate(swp_entry_t entry)
3459{
3460        int err = 0;
3461
3462        while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3463                err = add_swap_count_continuation(entry, GFP_ATOMIC);
3464        return err;
3465}
3466
3467/*
3468 * @entry: swap entry for which we allocate swap cache.
3469 *
3470 * Called when allocating swap cache for existing swap entry,
3471 * This can return error codes. Returns 0 at success.
3472 * -EBUSY means there is a swap cache.
3473 * Note: return code is different from swap_duplicate().
3474 */
3475int swapcache_prepare(swp_entry_t entry)
3476{
3477        return __swap_duplicate(entry, SWAP_HAS_CACHE);
3478}
3479
3480struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3481{
3482        return swap_type_to_swap_info(swp_type(entry));
3483}
3484
3485struct swap_info_struct *page_swap_info(struct page *page)
3486{
3487        swp_entry_t entry = { .val = page_private(page) };
3488        return swp_swap_info(entry);
3489}
3490
3491/*
3492 * out-of-line __page_file_ methods to avoid include hell.
3493 */
3494struct address_space *__page_file_mapping(struct page *page)
3495{
3496        return page_swap_info(page)->swap_file->f_mapping;
3497}
3498EXPORT_SYMBOL_GPL(__page_file_mapping);
3499
3500pgoff_t __page_file_index(struct page *page)
3501{
3502        swp_entry_t swap = { .val = page_private(page) };
3503        return swp_offset(swap);
3504}
3505EXPORT_SYMBOL_GPL(__page_file_index);
3506
3507/*
3508 * add_swap_count_continuation - called when a swap count is duplicated
3509 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3510 * page of the original vmalloc'ed swap_map, to hold the continuation count
3511 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3512 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3513 *
3514 * These continuation pages are seldom referenced: the common paths all work
3515 * on the original swap_map, only referring to a continuation page when the
3516 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3517 *
3518 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3519 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3520 * can be called after dropping locks.
3521 */
3522int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3523{
3524        struct swap_info_struct *si;
3525        struct swap_cluster_info *ci;
3526        struct page *head;
3527        struct page *page;
3528        struct page *list_page;
3529        pgoff_t offset;
3530        unsigned char count;
3531        int ret = 0;
3532
3533        /*
3534         * When debugging, it's easier to use __GFP_ZERO here; but it's better
3535         * for latency not to zero a page while GFP_ATOMIC and holding locks.
3536         */
3537        page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3538
3539        si = get_swap_device(entry);
3540        if (!si) {
3541                /*
3542                 * An acceptable race has occurred since the failing
3543                 * __swap_duplicate(): the swap device may be swapoff
3544                 */
3545                goto outer;
3546        }
3547        spin_lock(&si->lock);
3548
3549        offset = swp_offset(entry);
3550
3551        ci = lock_cluster(si, offset);
3552
3553        count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
3554
3555        if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3556                /*
3557                 * The higher the swap count, the more likely it is that tasks
3558                 * will race to add swap count continuation: we need to avoid
3559                 * over-provisioning.
3560                 */
3561                goto out;
3562        }
3563
3564        if (!page) {
3565                ret = -ENOMEM;
3566                goto out;
3567        }
3568
3569        /*
3570         * We are fortunate that although vmalloc_to_page uses pte_offset_map,
3571         * no architecture is using highmem pages for kernel page tables: so it
3572         * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3573         */
3574        head = vmalloc_to_page(si->swap_map + offset);
3575        offset &= ~PAGE_MASK;
3576
3577        spin_lock(&si->cont_lock);
3578        /*
3579         * Page allocation does not initialize the page's lru field,
3580         * but it does always reset its private field.
3581         */
3582        if (!page_private(head)) {
3583                BUG_ON(count & COUNT_CONTINUED);
3584                INIT_LIST_HEAD(&head->lru);
3585                set_page_private(head, SWP_CONTINUED);
3586                si->flags |= SWP_CONTINUED;
3587        }
3588
3589        list_for_each_entry(list_page, &head->lru, lru) {
3590                unsigned char *map;
3591
3592                /*
3593                 * If the previous map said no continuation, but we've found
3594                 * a continuation page, free our allocation and use this one.
3595                 */
3596                if (!(count & COUNT_CONTINUED))
3597                        goto out_unlock_cont;
3598
3599                map = kmap_atomic(list_page) + offset;
3600                count = *map;
3601                kunmap_atomic(map);
3602
3603                /*
3604                 * If this continuation count now has some space in it,
3605                 * free our allocation and use this one.
3606                 */
3607                if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3608                        goto out_unlock_cont;
3609        }
3610
3611        list_add_tail(&page->lru, &head->lru);
3612        page = NULL;                    /* now it's attached, don't free it */
3613out_unlock_cont:
3614        spin_unlock(&si->cont_lock);
3615out:
3616        unlock_cluster(ci);
3617        spin_unlock(&si->lock);
3618        put_swap_device(si);
3619outer:
3620        if (page)
3621                __free_page(page);
3622        return ret;
3623}
3624
3625/*
3626 * swap_count_continued - when the original swap_map count is incremented
3627 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3628 * into, carry if so, or else fail until a new continuation page is allocated;
3629 * when the original swap_map count is decremented from 0 with continuation,
3630 * borrow from the continuation and report whether it still holds more.
3631 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3632 * lock.
3633 */
3634static bool swap_count_continued(struct swap_info_struct *si,
3635                                 pgoff_t offset, unsigned char count)
3636{
3637        struct page *head;
3638        struct page *page;
3639        unsigned char *map;
3640        bool ret;
3641
3642        head = vmalloc_to_page(si->swap_map + offset);
3643        if (page_private(head) != SWP_CONTINUED) {
3644                BUG_ON(count & COUNT_CONTINUED);
3645                return false;           /* need to add count continuation */
3646        }
3647
3648        spin_lock(&si->cont_lock);
3649        offset &= ~PAGE_MASK;
3650        page = list_entry(head->lru.next, struct page, lru);
3651        map = kmap_atomic(page) + offset;
3652
3653        if (count == SWAP_MAP_MAX)      /* initial increment from swap_map */
3654                goto init_map;          /* jump over SWAP_CONT_MAX checks */
3655
3656        if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3657                /*
3658                 * Think of how you add 1 to 999
3659                 */
3660                while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3661                        kunmap_atomic(map);
3662                        page = list_entry(page->lru.next, struct page, lru);
3663                        BUG_ON(page == head);
3664                        map = kmap_atomic(page) + offset;
3665                }
3666                if (*map == SWAP_CONT_MAX) {
3667                        kunmap_atomic(map);
3668                        page = list_entry(page->lru.next, struct page, lru);
3669                        if (page == head) {
3670                                ret = false;    /* add count continuation */
3671                                goto out;
3672                        }
3673                        map = kmap_atomic(page) + offset;
3674init_map:               *map = 0;               /* we didn't zero the page */
3675                }
3676                *map += 1;
3677                kunmap_atomic(map);
3678                page = list_entry(page->lru.prev, struct page, lru);
3679                while (page != head) {
3680                        map = kmap_atomic(page) + offset;
3681                        *map = COUNT_CONTINUED;
3682                        kunmap_atomic(map);
3683                        page = list_entry(page->lru.prev, struct page, lru);
3684                }
3685                ret = true;                     /* incremented */
3686
3687        } else {                                /* decrementing */
3688                /*
3689                 * Think of how you subtract 1 from 1000
3690                 */
3691                BUG_ON(count != COUNT_CONTINUED);
3692                while (*map == COUNT_CONTINUED) {
3693                        kunmap_atomic(map);
3694                        page = list_entry(page->lru.next, struct page, lru);
3695                        BUG_ON(page == head);
3696                        map = kmap_atomic(page) + offset;
3697                }
3698                BUG_ON(*map == 0);
3699                *map -= 1;
3700                if (*map == 0)
3701                        count = 0;
3702                kunmap_atomic(map);
3703                page = list_entry(page->lru.prev, struct page, lru);
3704                while (page != head) {
3705                        map = kmap_atomic(page) + offset;
3706                        *map = SWAP_CONT_MAX | count;
3707                        count = COUNT_CONTINUED;
3708                        kunmap_atomic(map);
3709                        page = list_entry(page->lru.prev, struct page, lru);
3710                }
3711                ret = count == COUNT_CONTINUED;
3712        }
3713out:
3714        spin_unlock(&si->cont_lock);
3715        return ret;
3716}
3717
3718/*
3719 * free_swap_count_continuations - swapoff free all the continuation pages
3720 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3721 */
3722static void free_swap_count_continuations(struct swap_info_struct *si)
3723{
3724        pgoff_t offset;
3725
3726        for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3727                struct page *head;
3728                head = vmalloc_to_page(si->swap_map + offset);
3729                if (page_private(head)) {
3730                        struct page *page, *next;
3731
3732                        list_for_each_entry_safe(page, next, &head->lru, lru) {
3733                                list_del(&page->lru);
3734                                __free_page(page);
3735                        }
3736                }
3737        }
3738}
3739
3740#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3741void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
3742                                  gfp_t gfp_mask)
3743{
3744        struct swap_info_struct *si, *next;
3745        if (!(gfp_mask & __GFP_IO) || !memcg)
3746                return;
3747
3748        if (!blk_cgroup_congested())
3749                return;
3750
3751        /*
3752         * We've already scheduled a throttle, avoid taking the global swap
3753         * lock.
3754         */
3755        if (current->throttle_queue)
3756                return;
3757
3758        spin_lock(&swap_avail_lock);
3759        plist_for_each_entry_safe(si, next, &swap_avail_heads[node],
3760                                  avail_lists[node]) {
3761                if (si->bdev) {
3762                        blkcg_schedule_throttle(bdev_get_queue(si->bdev),
3763                                                true);
3764                        break;
3765                }
3766        }
3767        spin_unlock(&swap_avail_lock);
3768}
3769#endif
3770
3771static int __init swapfile_init(void)
3772{
3773        int nid;
3774
3775        swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3776                                         GFP_KERNEL);
3777        if (!swap_avail_heads) {
3778                pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3779                return -ENOMEM;
3780        }
3781
3782        for_each_node(nid)
3783                plist_head_init(&swap_avail_heads[nid]);
3784
3785        return 0;
3786}
3787subsys_initcall(swapfile_init);
3788