linux/mm/swapfile.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/swapfile.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *  Swap reorganised 29.12.95, Stephen Tweedie
   7 */
   8
   9#include <linux/mm.h>
  10#include <linux/sched/mm.h>
  11#include <linux/sched/task.h>
  12#include <linux/hugetlb.h>
  13#include <linux/mman.h>
  14#include <linux/slab.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/swap.h>
  17#include <linux/vmalloc.h>
  18#include <linux/pagemap.h>
  19#include <linux/namei.h>
  20#include <linux/shmem_fs.h>
  21#include <linux/blkdev.h>
  22#include <linux/random.h>
  23#include <linux/writeback.h>
  24#include <linux/proc_fs.h>
  25#include <linux/seq_file.h>
  26#include <linux/init.h>
  27#include <linux/ksm.h>
  28#include <linux/rmap.h>
  29#include <linux/security.h>
  30#include <linux/backing-dev.h>
  31#include <linux/mutex.h>
  32#include <linux/capability.h>
  33#include <linux/syscalls.h>
  34#include <linux/memcontrol.h>
  35#include <linux/poll.h>
  36#include <linux/oom.h>
  37#include <linux/frontswap.h>
  38#include <linux/swapfile.h>
  39#include <linux/export.h>
  40#include <linux/swap_slots.h>
  41#include <linux/sort.h>
  42#include <linux/completion.h>
  43
  44#include <asm/tlbflush.h>
  45#include <linux/swapops.h>
  46#include <linux/swap_cgroup.h>
  47
  48static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  49                                 unsigned char);
  50static void free_swap_count_continuations(struct swap_info_struct *);
  51
  52DEFINE_SPINLOCK(swap_lock);
  53static unsigned int nr_swapfiles;
  54atomic_long_t nr_swap_pages;
  55/*
  56 * Some modules use swappable objects and may try to swap them out under
  57 * memory pressure (via the shrinker). Before doing so, they may wish to
  58 * check to see if any swap space is available.
  59 */
  60EXPORT_SYMBOL_GPL(nr_swap_pages);
  61/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  62long total_swap_pages;
  63static int least_priority = -1;
  64
  65static const char Bad_file[] = "Bad swap file entry ";
  66static const char Unused_file[] = "Unused swap file entry ";
  67static const char Bad_offset[] = "Bad swap offset entry ";
  68static const char Unused_offset[] = "Unused swap offset entry ";
  69
  70/*
  71 * all active swap_info_structs
  72 * protected with swap_lock, and ordered by priority.
  73 */
  74PLIST_HEAD(swap_active_head);
  75
  76/*
  77 * all available (active, not full) swap_info_structs
  78 * protected with swap_avail_lock, ordered by priority.
  79 * This is used by get_swap_page() instead of swap_active_head
  80 * because swap_active_head includes all swap_info_structs,
  81 * but get_swap_page() doesn't need to look at full ones.
  82 * This uses its own lock instead of swap_lock because when a
  83 * swap_info_struct changes between not-full/full, it needs to
  84 * add/remove itself to/from this list, but the swap_info_struct->lock
  85 * is held and the locking order requires swap_lock to be taken
  86 * before any swap_info_struct->lock.
  87 */
  88static struct plist_head *swap_avail_heads;
  89static DEFINE_SPINLOCK(swap_avail_lock);
  90
  91struct swap_info_struct *swap_info[MAX_SWAPFILES];
  92
  93static DEFINE_MUTEX(swapon_mutex);
  94
  95static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  96/* Activity counter to indicate that a swapon or swapoff has occurred */
  97static atomic_t proc_poll_event = ATOMIC_INIT(0);
  98
  99atomic_t nr_rotate_swap = ATOMIC_INIT(0);
 100
 101static struct swap_info_struct *swap_type_to_swap_info(int type)
 102{
 103        if (type >= MAX_SWAPFILES)
 104                return NULL;
 105
 106        return READ_ONCE(swap_info[type]); /* rcu_dereference() */
 107}
 108
 109static inline unsigned char swap_count(unsigned char ent)
 110{
 111        return ent & ~SWAP_HAS_CACHE;   /* may include COUNT_CONTINUED flag */
 112}
 113
 114/* Reclaim the swap entry anyway if possible */
 115#define TTRS_ANYWAY             0x1
 116/*
 117 * Reclaim the swap entry if there are no more mappings of the
 118 * corresponding page
 119 */
 120#define TTRS_UNMAPPED           0x2
 121/* Reclaim the swap entry if swap is getting full*/
 122#define TTRS_FULL               0x4
 123
 124/* returns 1 if swap entry is freed */
 125static int __try_to_reclaim_swap(struct swap_info_struct *si,
 126                                 unsigned long offset, unsigned long flags)
 127{
 128        swp_entry_t entry = swp_entry(si->type, offset);
 129        struct page *page;
 130        int ret = 0;
 131
 132        page = find_get_page(swap_address_space(entry), offset);
 133        if (!page)
 134                return 0;
 135        /*
 136         * When this function is called from scan_swap_map_slots() and it's
 137         * called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
 138         * here. We have to use trylock for avoiding deadlock. This is a special
 139         * case and you should use try_to_free_swap() with explicit lock_page()
 140         * in usual operations.
 141         */
 142        if (trylock_page(page)) {
 143                if ((flags & TTRS_ANYWAY) ||
 144                    ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
 145                    ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
 146                        ret = try_to_free_swap(page);
 147                unlock_page(page);
 148        }
 149        put_page(page);
 150        return ret;
 151}
 152
 153static inline struct swap_extent *first_se(struct swap_info_struct *sis)
 154{
 155        struct rb_node *rb = rb_first(&sis->swap_extent_root);
 156        return rb_entry(rb, struct swap_extent, rb_node);
 157}
 158
 159static inline struct swap_extent *next_se(struct swap_extent *se)
 160{
 161        struct rb_node *rb = rb_next(&se->rb_node);
 162        return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
 163}
 164
 165/*
 166 * swapon tell device that all the old swap contents can be discarded,
 167 * to allow the swap device to optimize its wear-levelling.
 168 */
 169static int discard_swap(struct swap_info_struct *si)
 170{
 171        struct swap_extent *se;
 172        sector_t start_block;
 173        sector_t nr_blocks;
 174        int err = 0;
 175
 176        /* Do not discard the swap header page! */
 177        se = first_se(si);
 178        start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 179        nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 180        if (nr_blocks) {
 181                err = blkdev_issue_discard(si->bdev, start_block,
 182                                nr_blocks, GFP_KERNEL, 0);
 183                if (err)
 184                        return err;
 185                cond_resched();
 186        }
 187
 188        for (se = next_se(se); se; se = next_se(se)) {
 189                start_block = se->start_block << (PAGE_SHIFT - 9);
 190                nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 191
 192                err = blkdev_issue_discard(si->bdev, start_block,
 193                                nr_blocks, GFP_KERNEL, 0);
 194                if (err)
 195                        break;
 196
 197                cond_resched();
 198        }
 199        return err;             /* That will often be -EOPNOTSUPP */
 200}
 201
 202static struct swap_extent *
 203offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
 204{
 205        struct swap_extent *se;
 206        struct rb_node *rb;
 207
 208        rb = sis->swap_extent_root.rb_node;
 209        while (rb) {
 210                se = rb_entry(rb, struct swap_extent, rb_node);
 211                if (offset < se->start_page)
 212                        rb = rb->rb_left;
 213                else if (offset >= se->start_page + se->nr_pages)
 214                        rb = rb->rb_right;
 215                else
 216                        return se;
 217        }
 218        /* It *must* be present */
 219        BUG();
 220}
 221
 222sector_t swap_page_sector(struct page *page)
 223{
 224        struct swap_info_struct *sis = page_swap_info(page);
 225        struct swap_extent *se;
 226        sector_t sector;
 227        pgoff_t offset;
 228
 229        offset = __page_file_index(page);
 230        se = offset_to_swap_extent(sis, offset);
 231        sector = se->start_block + (offset - se->start_page);
 232        return sector << (PAGE_SHIFT - 9);
 233}
 234
 235/*
 236 * swap allocation tell device that a cluster of swap can now be discarded,
 237 * to allow the swap device to optimize its wear-levelling.
 238 */
 239static void discard_swap_cluster(struct swap_info_struct *si,
 240                                 pgoff_t start_page, pgoff_t nr_pages)
 241{
 242        struct swap_extent *se = offset_to_swap_extent(si, start_page);
 243
 244        while (nr_pages) {
 245                pgoff_t offset = start_page - se->start_page;
 246                sector_t start_block = se->start_block + offset;
 247                sector_t nr_blocks = se->nr_pages - offset;
 248
 249                if (nr_blocks > nr_pages)
 250                        nr_blocks = nr_pages;
 251                start_page += nr_blocks;
 252                nr_pages -= nr_blocks;
 253
 254                start_block <<= PAGE_SHIFT - 9;
 255                nr_blocks <<= PAGE_SHIFT - 9;
 256                if (blkdev_issue_discard(si->bdev, start_block,
 257                                        nr_blocks, GFP_NOIO, 0))
 258                        break;
 259
 260                se = next_se(se);
 261        }
 262}
 263
 264#ifdef CONFIG_THP_SWAP
 265#define SWAPFILE_CLUSTER        HPAGE_PMD_NR
 266
 267#define swap_entry_size(size)   (size)
 268#else
 269#define SWAPFILE_CLUSTER        256
 270
 271/*
 272 * Define swap_entry_size() as constant to let compiler to optimize
 273 * out some code if !CONFIG_THP_SWAP
 274 */
 275#define swap_entry_size(size)   1
 276#endif
 277#define LATENCY_LIMIT           256
 278
 279static inline void cluster_set_flag(struct swap_cluster_info *info,
 280        unsigned int flag)
 281{
 282        info->flags = flag;
 283}
 284
 285static inline unsigned int cluster_count(struct swap_cluster_info *info)
 286{
 287        return info->data;
 288}
 289
 290static inline void cluster_set_count(struct swap_cluster_info *info,
 291                                     unsigned int c)
 292{
 293        info->data = c;
 294}
 295
 296static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 297                                         unsigned int c, unsigned int f)
 298{
 299        info->flags = f;
 300        info->data = c;
 301}
 302
 303static inline unsigned int cluster_next(struct swap_cluster_info *info)
 304{
 305        return info->data;
 306}
 307
 308static inline void cluster_set_next(struct swap_cluster_info *info,
 309                                    unsigned int n)
 310{
 311        info->data = n;
 312}
 313
 314static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 315                                         unsigned int n, unsigned int f)
 316{
 317        info->flags = f;
 318        info->data = n;
 319}
 320
 321static inline bool cluster_is_free(struct swap_cluster_info *info)
 322{
 323        return info->flags & CLUSTER_FLAG_FREE;
 324}
 325
 326static inline bool cluster_is_null(struct swap_cluster_info *info)
 327{
 328        return info->flags & CLUSTER_FLAG_NEXT_NULL;
 329}
 330
 331static inline void cluster_set_null(struct swap_cluster_info *info)
 332{
 333        info->flags = CLUSTER_FLAG_NEXT_NULL;
 334        info->data = 0;
 335}
 336
 337static inline bool cluster_is_huge(struct swap_cluster_info *info)
 338{
 339        if (IS_ENABLED(CONFIG_THP_SWAP))
 340                return info->flags & CLUSTER_FLAG_HUGE;
 341        return false;
 342}
 343
 344static inline void cluster_clear_huge(struct swap_cluster_info *info)
 345{
 346        info->flags &= ~CLUSTER_FLAG_HUGE;
 347}
 348
 349static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
 350                                                     unsigned long offset)
 351{
 352        struct swap_cluster_info *ci;
 353
 354        ci = si->cluster_info;
 355        if (ci) {
 356                ci += offset / SWAPFILE_CLUSTER;
 357                spin_lock(&ci->lock);
 358        }
 359        return ci;
 360}
 361
 362static inline void unlock_cluster(struct swap_cluster_info *ci)
 363{
 364        if (ci)
 365                spin_unlock(&ci->lock);
 366}
 367
 368/*
 369 * Determine the locking method in use for this device.  Return
 370 * swap_cluster_info if SSD-style cluster-based locking is in place.
 371 */
 372static inline struct swap_cluster_info *lock_cluster_or_swap_info(
 373                struct swap_info_struct *si, unsigned long offset)
 374{
 375        struct swap_cluster_info *ci;
 376
 377        /* Try to use fine-grained SSD-style locking if available: */
 378        ci = lock_cluster(si, offset);
 379        /* Otherwise, fall back to traditional, coarse locking: */
 380        if (!ci)
 381                spin_lock(&si->lock);
 382
 383        return ci;
 384}
 385
 386static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
 387                                               struct swap_cluster_info *ci)
 388{
 389        if (ci)
 390                unlock_cluster(ci);
 391        else
 392                spin_unlock(&si->lock);
 393}
 394
 395static inline bool cluster_list_empty(struct swap_cluster_list *list)
 396{
 397        return cluster_is_null(&list->head);
 398}
 399
 400static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
 401{
 402        return cluster_next(&list->head);
 403}
 404
 405static void cluster_list_init(struct swap_cluster_list *list)
 406{
 407        cluster_set_null(&list->head);
 408        cluster_set_null(&list->tail);
 409}
 410
 411static void cluster_list_add_tail(struct swap_cluster_list *list,
 412                                  struct swap_cluster_info *ci,
 413                                  unsigned int idx)
 414{
 415        if (cluster_list_empty(list)) {
 416                cluster_set_next_flag(&list->head, idx, 0);
 417                cluster_set_next_flag(&list->tail, idx, 0);
 418        } else {
 419                struct swap_cluster_info *ci_tail;
 420                unsigned int tail = cluster_next(&list->tail);
 421
 422                /*
 423                 * Nested cluster lock, but both cluster locks are
 424                 * only acquired when we held swap_info_struct->lock
 425                 */
 426                ci_tail = ci + tail;
 427                spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
 428                cluster_set_next(ci_tail, idx);
 429                spin_unlock(&ci_tail->lock);
 430                cluster_set_next_flag(&list->tail, idx, 0);
 431        }
 432}
 433
 434static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
 435                                           struct swap_cluster_info *ci)
 436{
 437        unsigned int idx;
 438
 439        idx = cluster_next(&list->head);
 440        if (cluster_next(&list->tail) == idx) {
 441                cluster_set_null(&list->head);
 442                cluster_set_null(&list->tail);
 443        } else
 444                cluster_set_next_flag(&list->head,
 445                                      cluster_next(&ci[idx]), 0);
 446
 447        return idx;
 448}
 449
 450/* Add a cluster to discard list and schedule it to do discard */
 451static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 452                unsigned int idx)
 453{
 454        /*
 455         * If scan_swap_map_slots() can't find a free cluster, it will check
 456         * si->swap_map directly. To make sure the discarding cluster isn't
 457         * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
 458         * It will be cleared after discard
 459         */
 460        memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 461                        SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 462
 463        cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
 464
 465        schedule_work(&si->discard_work);
 466}
 467
 468static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
 469{
 470        struct swap_cluster_info *ci = si->cluster_info;
 471
 472        cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
 473        cluster_list_add_tail(&si->free_clusters, ci, idx);
 474}
 475
 476/*
 477 * Doing discard actually. After a cluster discard is finished, the cluster
 478 * will be added to free cluster list. caller should hold si->lock.
 479*/
 480static void swap_do_scheduled_discard(struct swap_info_struct *si)
 481{
 482        struct swap_cluster_info *info, *ci;
 483        unsigned int idx;
 484
 485        info = si->cluster_info;
 486
 487        while (!cluster_list_empty(&si->discard_clusters)) {
 488                idx = cluster_list_del_first(&si->discard_clusters, info);
 489                spin_unlock(&si->lock);
 490
 491                discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 492                                SWAPFILE_CLUSTER);
 493
 494                spin_lock(&si->lock);
 495                ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
 496                __free_cluster(si, idx);
 497                memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 498                                0, SWAPFILE_CLUSTER);
 499                unlock_cluster(ci);
 500        }
 501}
 502
 503static void swap_discard_work(struct work_struct *work)
 504{
 505        struct swap_info_struct *si;
 506
 507        si = container_of(work, struct swap_info_struct, discard_work);
 508
 509        spin_lock(&si->lock);
 510        swap_do_scheduled_discard(si);
 511        spin_unlock(&si->lock);
 512}
 513
 514static void swap_users_ref_free(struct percpu_ref *ref)
 515{
 516        struct swap_info_struct *si;
 517
 518        si = container_of(ref, struct swap_info_struct, users);
 519        complete(&si->comp);
 520}
 521
 522static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
 523{
 524        struct swap_cluster_info *ci = si->cluster_info;
 525
 526        VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
 527        cluster_list_del_first(&si->free_clusters, ci);
 528        cluster_set_count_flag(ci + idx, 0, 0);
 529}
 530
 531static void free_cluster(struct swap_info_struct *si, unsigned long idx)
 532{
 533        struct swap_cluster_info *ci = si->cluster_info + idx;
 534
 535        VM_BUG_ON(cluster_count(ci) != 0);
 536        /*
 537         * If the swap is discardable, prepare discard the cluster
 538         * instead of free it immediately. The cluster will be freed
 539         * after discard.
 540         */
 541        if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 542            (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 543                swap_cluster_schedule_discard(si, idx);
 544                return;
 545        }
 546
 547        __free_cluster(si, idx);
 548}
 549
 550/*
 551 * The cluster corresponding to page_nr will be used. The cluster will be
 552 * removed from free cluster list and its usage counter will be increased.
 553 */
 554static void inc_cluster_info_page(struct swap_info_struct *p,
 555        struct swap_cluster_info *cluster_info, unsigned long page_nr)
 556{
 557        unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 558
 559        if (!cluster_info)
 560                return;
 561        if (cluster_is_free(&cluster_info[idx]))
 562                alloc_cluster(p, idx);
 563
 564        VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 565        cluster_set_count(&cluster_info[idx],
 566                cluster_count(&cluster_info[idx]) + 1);
 567}
 568
 569/*
 570 * The cluster corresponding to page_nr decreases one usage. If the usage
 571 * counter becomes 0, which means no page in the cluster is in using, we can
 572 * optionally discard the cluster and add it to free cluster list.
 573 */
 574static void dec_cluster_info_page(struct swap_info_struct *p,
 575        struct swap_cluster_info *cluster_info, unsigned long page_nr)
 576{
 577        unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 578
 579        if (!cluster_info)
 580                return;
 581
 582        VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 583        cluster_set_count(&cluster_info[idx],
 584                cluster_count(&cluster_info[idx]) - 1);
 585
 586        if (cluster_count(&cluster_info[idx]) == 0)
 587                free_cluster(p, idx);
 588}
 589
 590/*
 591 * It's possible scan_swap_map_slots() uses a free cluster in the middle of free
 592 * cluster list. Avoiding such abuse to avoid list corruption.
 593 */
 594static bool
 595scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 596        unsigned long offset)
 597{
 598        struct percpu_cluster *percpu_cluster;
 599        bool conflict;
 600
 601        offset /= SWAPFILE_CLUSTER;
 602        conflict = !cluster_list_empty(&si->free_clusters) &&
 603                offset != cluster_list_first(&si->free_clusters) &&
 604                cluster_is_free(&si->cluster_info[offset]);
 605
 606        if (!conflict)
 607                return false;
 608
 609        percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 610        cluster_set_null(&percpu_cluster->index);
 611        return true;
 612}
 613
 614/*
 615 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 616 * might involve allocating a new cluster for current CPU too.
 617 */
 618static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 619        unsigned long *offset, unsigned long *scan_base)
 620{
 621        struct percpu_cluster *cluster;
 622        struct swap_cluster_info *ci;
 623        unsigned long tmp, max;
 624
 625new_cluster:
 626        cluster = this_cpu_ptr(si->percpu_cluster);
 627        if (cluster_is_null(&cluster->index)) {
 628                if (!cluster_list_empty(&si->free_clusters)) {
 629                        cluster->index = si->free_clusters.head;
 630                        cluster->next = cluster_next(&cluster->index) *
 631                                        SWAPFILE_CLUSTER;
 632                } else if (!cluster_list_empty(&si->discard_clusters)) {
 633                        /*
 634                         * we don't have free cluster but have some clusters in
 635                         * discarding, do discard now and reclaim them, then
 636                         * reread cluster_next_cpu since we dropped si->lock
 637                         */
 638                        swap_do_scheduled_discard(si);
 639                        *scan_base = this_cpu_read(*si->cluster_next_cpu);
 640                        *offset = *scan_base;
 641                        goto new_cluster;
 642                } else
 643                        return false;
 644        }
 645
 646        /*
 647         * Other CPUs can use our cluster if they can't find a free cluster,
 648         * check if there is still free entry in the cluster
 649         */
 650        tmp = cluster->next;
 651        max = min_t(unsigned long, si->max,
 652                    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
 653        if (tmp < max) {
 654                ci = lock_cluster(si, tmp);
 655                while (tmp < max) {
 656                        if (!si->swap_map[tmp])
 657                                break;
 658                        tmp++;
 659                }
 660                unlock_cluster(ci);
 661        }
 662        if (tmp >= max) {
 663                cluster_set_null(&cluster->index);
 664                goto new_cluster;
 665        }
 666        cluster->next = tmp + 1;
 667        *offset = tmp;
 668        *scan_base = tmp;
 669        return true;
 670}
 671
 672static void __del_from_avail_list(struct swap_info_struct *p)
 673{
 674        int nid;
 675
 676        for_each_node(nid)
 677                plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
 678}
 679
 680static void del_from_avail_list(struct swap_info_struct *p)
 681{
 682        spin_lock(&swap_avail_lock);
 683        __del_from_avail_list(p);
 684        spin_unlock(&swap_avail_lock);
 685}
 686
 687static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
 688                             unsigned int nr_entries)
 689{
 690        unsigned int end = offset + nr_entries - 1;
 691
 692        if (offset == si->lowest_bit)
 693                si->lowest_bit += nr_entries;
 694        if (end == si->highest_bit)
 695                WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
 696        si->inuse_pages += nr_entries;
 697        if (si->inuse_pages == si->pages) {
 698                si->lowest_bit = si->max;
 699                si->highest_bit = 0;
 700                del_from_avail_list(si);
 701        }
 702}
 703
 704static void add_to_avail_list(struct swap_info_struct *p)
 705{
 706        int nid;
 707
 708        spin_lock(&swap_avail_lock);
 709        for_each_node(nid) {
 710                WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
 711                plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
 712        }
 713        spin_unlock(&swap_avail_lock);
 714}
 715
 716static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 717                            unsigned int nr_entries)
 718{
 719        unsigned long begin = offset;
 720        unsigned long end = offset + nr_entries - 1;
 721        void (*swap_slot_free_notify)(struct block_device *, unsigned long);
 722
 723        if (offset < si->lowest_bit)
 724                si->lowest_bit = offset;
 725        if (end > si->highest_bit) {
 726                bool was_full = !si->highest_bit;
 727
 728                WRITE_ONCE(si->highest_bit, end);
 729                if (was_full && (si->flags & SWP_WRITEOK))
 730                        add_to_avail_list(si);
 731        }
 732        atomic_long_add(nr_entries, &nr_swap_pages);
 733        si->inuse_pages -= nr_entries;
 734        if (si->flags & SWP_BLKDEV)
 735                swap_slot_free_notify =
 736                        si->bdev->bd_disk->fops->swap_slot_free_notify;
 737        else
 738                swap_slot_free_notify = NULL;
 739        while (offset <= end) {
 740                arch_swap_invalidate_page(si->type, offset);
 741                frontswap_invalidate_page(si->type, offset);
 742                if (swap_slot_free_notify)
 743                        swap_slot_free_notify(si->bdev, offset);
 744                offset++;
 745        }
 746        clear_shadow_from_swap_cache(si->type, begin, end);
 747}
 748
 749static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
 750{
 751        unsigned long prev;
 752
 753        if (!(si->flags & SWP_SOLIDSTATE)) {
 754                si->cluster_next = next;
 755                return;
 756        }
 757
 758        prev = this_cpu_read(*si->cluster_next_cpu);
 759        /*
 760         * Cross the swap address space size aligned trunk, choose
 761         * another trunk randomly to avoid lock contention on swap
 762         * address space if possible.
 763         */
 764        if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
 765            (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
 766                /* No free swap slots available */
 767                if (si->highest_bit <= si->lowest_bit)
 768                        return;
 769                next = si->lowest_bit +
 770                        prandom_u32_max(si->highest_bit - si->lowest_bit + 1);
 771                next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
 772                next = max_t(unsigned int, next, si->lowest_bit);
 773        }
 774        this_cpu_write(*si->cluster_next_cpu, next);
 775}
 776
 777static int scan_swap_map_slots(struct swap_info_struct *si,
 778                               unsigned char usage, int nr,
 779                               swp_entry_t slots[])
 780{
 781        struct swap_cluster_info *ci;
 782        unsigned long offset;
 783        unsigned long scan_base;
 784        unsigned long last_in_cluster = 0;
 785        int latency_ration = LATENCY_LIMIT;
 786        int n_ret = 0;
 787        bool scanned_many = false;
 788
 789        /*
 790         * We try to cluster swap pages by allocating them sequentially
 791         * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 792         * way, however, we resort to first-free allocation, starting
 793         * a new cluster.  This prevents us from scattering swap pages
 794         * all over the entire swap partition, so that we reduce
 795         * overall disk seek times between swap pages.  -- sct
 796         * But we do now try to find an empty cluster.  -Andrea
 797         * And we let swap pages go all over an SSD partition.  Hugh
 798         */
 799
 800        si->flags += SWP_SCANNING;
 801        /*
 802         * Use percpu scan base for SSD to reduce lock contention on
 803         * cluster and swap cache.  For HDD, sequential access is more
 804         * important.
 805         */
 806        if (si->flags & SWP_SOLIDSTATE)
 807                scan_base = this_cpu_read(*si->cluster_next_cpu);
 808        else
 809                scan_base = si->cluster_next;
 810        offset = scan_base;
 811
 812        /* SSD algorithm */
 813        if (si->cluster_info) {
 814                if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 815                        goto scan;
 816        } else if (unlikely(!si->cluster_nr--)) {
 817                if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 818                        si->cluster_nr = SWAPFILE_CLUSTER - 1;
 819                        goto checks;
 820                }
 821
 822                spin_unlock(&si->lock);
 823
 824                /*
 825                 * If seek is expensive, start searching for new cluster from
 826                 * start of partition, to minimize the span of allocated swap.
 827                 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
 828                 * case, just handled by scan_swap_map_try_ssd_cluster() above.
 829                 */
 830                scan_base = offset = si->lowest_bit;
 831                last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 832
 833                /* Locate the first empty (unaligned) cluster */
 834                for (; last_in_cluster <= si->highest_bit; offset++) {
 835                        if (si->swap_map[offset])
 836                                last_in_cluster = offset + SWAPFILE_CLUSTER;
 837                        else if (offset == last_in_cluster) {
 838                                spin_lock(&si->lock);
 839                                offset -= SWAPFILE_CLUSTER - 1;
 840                                si->cluster_next = offset;
 841                                si->cluster_nr = SWAPFILE_CLUSTER - 1;
 842                                goto checks;
 843                        }
 844                        if (unlikely(--latency_ration < 0)) {
 845                                cond_resched();
 846                                latency_ration = LATENCY_LIMIT;
 847                        }
 848                }
 849
 850                offset = scan_base;
 851                spin_lock(&si->lock);
 852                si->cluster_nr = SWAPFILE_CLUSTER - 1;
 853        }
 854
 855checks:
 856        if (si->cluster_info) {
 857                while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
 858                /* take a break if we already got some slots */
 859                        if (n_ret)
 860                                goto done;
 861                        if (!scan_swap_map_try_ssd_cluster(si, &offset,
 862                                                        &scan_base))
 863                                goto scan;
 864                }
 865        }
 866        if (!(si->flags & SWP_WRITEOK))
 867                goto no_page;
 868        if (!si->highest_bit)
 869                goto no_page;
 870        if (offset > si->highest_bit)
 871                scan_base = offset = si->lowest_bit;
 872
 873        ci = lock_cluster(si, offset);
 874        /* reuse swap entry of cache-only swap if not busy. */
 875        if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 876                int swap_was_freed;
 877                unlock_cluster(ci);
 878                spin_unlock(&si->lock);
 879                swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
 880                spin_lock(&si->lock);
 881                /* entry was freed successfully, try to use this again */
 882                if (swap_was_freed)
 883                        goto checks;
 884                goto scan; /* check next one */
 885        }
 886
 887        if (si->swap_map[offset]) {
 888                unlock_cluster(ci);
 889                if (!n_ret)
 890                        goto scan;
 891                else
 892                        goto done;
 893        }
 894        WRITE_ONCE(si->swap_map[offset], usage);
 895        inc_cluster_info_page(si, si->cluster_info, offset);
 896        unlock_cluster(ci);
 897
 898        swap_range_alloc(si, offset, 1);
 899        slots[n_ret++] = swp_entry(si->type, offset);
 900
 901        /* got enough slots or reach max slots? */
 902        if ((n_ret == nr) || (offset >= si->highest_bit))
 903                goto done;
 904
 905        /* search for next available slot */
 906
 907        /* time to take a break? */
 908        if (unlikely(--latency_ration < 0)) {
 909                if (n_ret)
 910                        goto done;
 911                spin_unlock(&si->lock);
 912                cond_resched();
 913                spin_lock(&si->lock);
 914                latency_ration = LATENCY_LIMIT;
 915        }
 916
 917        /* try to get more slots in cluster */
 918        if (si->cluster_info) {
 919                if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 920                        goto checks;
 921        } else if (si->cluster_nr && !si->swap_map[++offset]) {
 922                /* non-ssd case, still more slots in cluster? */
 923                --si->cluster_nr;
 924                goto checks;
 925        }
 926
 927        /*
 928         * Even if there's no free clusters available (fragmented),
 929         * try to scan a little more quickly with lock held unless we
 930         * have scanned too many slots already.
 931         */
 932        if (!scanned_many) {
 933                unsigned long scan_limit;
 934
 935                if (offset < scan_base)
 936                        scan_limit = scan_base;
 937                else
 938                        scan_limit = si->highest_bit;
 939                for (; offset <= scan_limit && --latency_ration > 0;
 940                     offset++) {
 941                        if (!si->swap_map[offset])
 942                                goto checks;
 943                }
 944        }
 945
 946done:
 947        set_cluster_next(si, offset + 1);
 948        si->flags -= SWP_SCANNING;
 949        return n_ret;
 950
 951scan:
 952        spin_unlock(&si->lock);
 953        while (++offset <= READ_ONCE(si->highest_bit)) {
 954                if (data_race(!si->swap_map[offset])) {
 955                        spin_lock(&si->lock);
 956                        goto checks;
 957                }
 958                if (vm_swap_full() &&
 959                    READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
 960                        spin_lock(&si->lock);
 961                        goto checks;
 962                }
 963                if (unlikely(--latency_ration < 0)) {
 964                        cond_resched();
 965                        latency_ration = LATENCY_LIMIT;
 966                        scanned_many = true;
 967                }
 968        }
 969        offset = si->lowest_bit;
 970        while (offset < scan_base) {
 971                if (data_race(!si->swap_map[offset])) {
 972                        spin_lock(&si->lock);
 973                        goto checks;
 974                }
 975                if (vm_swap_full() &&
 976                    READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
 977                        spin_lock(&si->lock);
 978                        goto checks;
 979                }
 980                if (unlikely(--latency_ration < 0)) {
 981                        cond_resched();
 982                        latency_ration = LATENCY_LIMIT;
 983                        scanned_many = true;
 984                }
 985                offset++;
 986        }
 987        spin_lock(&si->lock);
 988
 989no_page:
 990        si->flags -= SWP_SCANNING;
 991        return n_ret;
 992}
 993
 994static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
 995{
 996        unsigned long idx;
 997        struct swap_cluster_info *ci;
 998        unsigned long offset;
 999
1000        /*
1001         * Should not even be attempting cluster allocations when huge
1002         * page swap is disabled.  Warn and fail the allocation.
1003         */
1004        if (!IS_ENABLED(CONFIG_THP_SWAP)) {
1005                VM_WARN_ON_ONCE(1);
1006                return 0;
1007        }
1008
1009        if (cluster_list_empty(&si->free_clusters))
1010                return 0;
1011
1012        idx = cluster_list_first(&si->free_clusters);
1013        offset = idx * SWAPFILE_CLUSTER;
1014        ci = lock_cluster(si, offset);
1015        alloc_cluster(si, idx);
1016        cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
1017
1018        memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
1019        unlock_cluster(ci);
1020        swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
1021        *slot = swp_entry(si->type, offset);
1022
1023        return 1;
1024}
1025
1026static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
1027{
1028        unsigned long offset = idx * SWAPFILE_CLUSTER;
1029        struct swap_cluster_info *ci;
1030
1031        ci = lock_cluster(si, offset);
1032        memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
1033        cluster_set_count_flag(ci, 0, 0);
1034        free_cluster(si, idx);
1035        unlock_cluster(ci);
1036        swap_range_free(si, offset, SWAPFILE_CLUSTER);
1037}
1038
1039int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
1040{
1041        unsigned long size = swap_entry_size(entry_size);
1042        struct swap_info_struct *si, *next;
1043        long avail_pgs;
1044        int n_ret = 0;
1045        int node;
1046
1047        /* Only single cluster request supported */
1048        WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1049
1050        spin_lock(&swap_avail_lock);
1051
1052        avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1053        if (avail_pgs <= 0) {
1054                spin_unlock(&swap_avail_lock);
1055                goto noswap;
1056        }
1057
1058        n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1059
1060        atomic_long_sub(n_goal * size, &nr_swap_pages);
1061
1062start_over:
1063        node = numa_node_id();
1064        plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1065                /* requeue si to after same-priority siblings */
1066                plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1067                spin_unlock(&swap_avail_lock);
1068                spin_lock(&si->lock);
1069                if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1070                        spin_lock(&swap_avail_lock);
1071                        if (plist_node_empty(&si->avail_lists[node])) {
1072                                spin_unlock(&si->lock);
1073                                goto nextsi;
1074                        }
1075                        WARN(!si->highest_bit,
1076                             "swap_info %d in list but !highest_bit\n",
1077                             si->type);
1078                        WARN(!(si->flags & SWP_WRITEOK),
1079                             "swap_info %d in list but !SWP_WRITEOK\n",
1080                             si->type);
1081                        __del_from_avail_list(si);
1082                        spin_unlock(&si->lock);
1083                        goto nextsi;
1084                }
1085                if (size == SWAPFILE_CLUSTER) {
1086                        if (si->flags & SWP_BLKDEV)
1087                                n_ret = swap_alloc_cluster(si, swp_entries);
1088                } else
1089                        n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1090                                                    n_goal, swp_entries);
1091                spin_unlock(&si->lock);
1092                if (n_ret || size == SWAPFILE_CLUSTER)
1093                        goto check_out;
1094                pr_debug("scan_swap_map of si %d failed to find offset\n",
1095                        si->type);
1096
1097                spin_lock(&swap_avail_lock);
1098nextsi:
1099                /*
1100                 * if we got here, it's likely that si was almost full before,
1101                 * and since scan_swap_map_slots() can drop the si->lock,
1102                 * multiple callers probably all tried to get a page from the
1103                 * same si and it filled up before we could get one; or, the si
1104                 * filled up between us dropping swap_avail_lock and taking
1105                 * si->lock. Since we dropped the swap_avail_lock, the
1106                 * swap_avail_head list may have been modified; so if next is
1107                 * still in the swap_avail_head list then try it, otherwise
1108                 * start over if we have not gotten any slots.
1109                 */
1110                if (plist_node_empty(&next->avail_lists[node]))
1111                        goto start_over;
1112        }
1113
1114        spin_unlock(&swap_avail_lock);
1115
1116check_out:
1117        if (n_ret < n_goal)
1118                atomic_long_add((long)(n_goal - n_ret) * size,
1119                                &nr_swap_pages);
1120noswap:
1121        return n_ret;
1122}
1123
1124static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
1125{
1126        struct swap_info_struct *p;
1127        unsigned long offset;
1128
1129        if (!entry.val)
1130                goto out;
1131        p = swp_swap_info(entry);
1132        if (!p)
1133                goto bad_nofile;
1134        if (data_race(!(p->flags & SWP_USED)))
1135                goto bad_device;
1136        offset = swp_offset(entry);
1137        if (offset >= p->max)
1138                goto bad_offset;
1139        return p;
1140
1141bad_offset:
1142        pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1143        goto out;
1144bad_device:
1145        pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1146        goto out;
1147bad_nofile:
1148        pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1149out:
1150        return NULL;
1151}
1152
1153static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1154{
1155        struct swap_info_struct *p;
1156
1157        p = __swap_info_get(entry);
1158        if (!p)
1159                goto out;
1160        if (data_race(!p->swap_map[swp_offset(entry)]))
1161                goto bad_free;
1162        return p;
1163
1164bad_free:
1165        pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1166out:
1167        return NULL;
1168}
1169
1170static struct swap_info_struct *swap_info_get(swp_entry_t entry)
1171{
1172        struct swap_info_struct *p;
1173
1174        p = _swap_info_get(entry);
1175        if (p)
1176                spin_lock(&p->lock);
1177        return p;
1178}
1179
1180static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1181                                        struct swap_info_struct *q)
1182{
1183        struct swap_info_struct *p;
1184
1185        p = _swap_info_get(entry);
1186
1187        if (p != q) {
1188                if (q != NULL)
1189                        spin_unlock(&q->lock);
1190                if (p != NULL)
1191                        spin_lock(&p->lock);
1192        }
1193        return p;
1194}
1195
1196static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1197                                              unsigned long offset,
1198                                              unsigned char usage)
1199{
1200        unsigned char count;
1201        unsigned char has_cache;
1202
1203        count = p->swap_map[offset];
1204
1205        has_cache = count & SWAP_HAS_CACHE;
1206        count &= ~SWAP_HAS_CACHE;
1207
1208        if (usage == SWAP_HAS_CACHE) {
1209                VM_BUG_ON(!has_cache);
1210                has_cache = 0;
1211        } else if (count == SWAP_MAP_SHMEM) {
1212                /*
1213                 * Or we could insist on shmem.c using a special
1214                 * swap_shmem_free() and free_shmem_swap_and_cache()...
1215                 */
1216                count = 0;
1217        } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1218                if (count == COUNT_CONTINUED) {
1219                        if (swap_count_continued(p, offset, count))
1220                                count = SWAP_MAP_MAX | COUNT_CONTINUED;
1221                        else
1222                                count = SWAP_MAP_MAX;
1223                } else
1224                        count--;
1225        }
1226
1227        usage = count | has_cache;
1228        if (usage)
1229                WRITE_ONCE(p->swap_map[offset], usage);
1230        else
1231                WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
1232
1233        return usage;
1234}
1235
1236/*
1237 * Check whether swap entry is valid in the swap device.  If so,
1238 * return pointer to swap_info_struct, and keep the swap entry valid
1239 * via preventing the swap device from being swapoff, until
1240 * put_swap_device() is called.  Otherwise return NULL.
1241 *
1242 * Notice that swapoff or swapoff+swapon can still happen before the
1243 * percpu_ref_tryget_live() in get_swap_device() or after the
1244 * percpu_ref_put() in put_swap_device() if there isn't any other way
1245 * to prevent swapoff, such as page lock, page table lock, etc.  The
1246 * caller must be prepared for that.  For example, the following
1247 * situation is possible.
1248 *
1249 *   CPU1                               CPU2
1250 *   do_swap_page()
1251 *     ...                              swapoff+swapon
1252 *     __read_swap_cache_async()
1253 *       swapcache_prepare()
1254 *         __swap_duplicate()
1255 *           // check swap_map
1256 *     // verify PTE not changed
1257 *
1258 * In __swap_duplicate(), the swap_map need to be checked before
1259 * changing partly because the specified swap entry may be for another
1260 * swap device which has been swapoff.  And in do_swap_page(), after
1261 * the page is read from the swap device, the PTE is verified not
1262 * changed with the page table locked to check whether the swap device
1263 * has been swapoff or swapoff+swapon.
1264 */
1265struct swap_info_struct *get_swap_device(swp_entry_t entry)
1266{
1267        struct swap_info_struct *si;
1268        unsigned long offset;
1269
1270        if (!entry.val)
1271                goto out;
1272        si = swp_swap_info(entry);
1273        if (!si)
1274                goto bad_nofile;
1275        if (!percpu_ref_tryget_live(&si->users))
1276                goto out;
1277        /*
1278         * Guarantee the si->users are checked before accessing other
1279         * fields of swap_info_struct.
1280         *
1281         * Paired with the spin_unlock() after setup_swap_info() in
1282         * enable_swap_info().
1283         */
1284        smp_rmb();
1285        offset = swp_offset(entry);
1286        if (offset >= si->max)
1287                goto put_out;
1288
1289        return si;
1290bad_nofile:
1291        pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1292out:
1293        return NULL;
1294put_out:
1295        percpu_ref_put(&si->users);
1296        return NULL;
1297}
1298
1299static unsigned char __swap_entry_free(struct swap_info_struct *p,
1300                                       swp_entry_t entry)
1301{
1302        struct swap_cluster_info *ci;
1303        unsigned long offset = swp_offset(entry);
1304        unsigned char usage;
1305
1306        ci = lock_cluster_or_swap_info(p, offset);
1307        usage = __swap_entry_free_locked(p, offset, 1);
1308        unlock_cluster_or_swap_info(p, ci);
1309        if (!usage)
1310                free_swap_slot(entry);
1311
1312        return usage;
1313}
1314
1315static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1316{
1317        struct swap_cluster_info *ci;
1318        unsigned long offset = swp_offset(entry);
1319        unsigned char count;
1320
1321        ci = lock_cluster(p, offset);
1322        count = p->swap_map[offset];
1323        VM_BUG_ON(count != SWAP_HAS_CACHE);
1324        p->swap_map[offset] = 0;
1325        dec_cluster_info_page(p, p->cluster_info, offset);
1326        unlock_cluster(ci);
1327
1328        mem_cgroup_uncharge_swap(entry, 1);
1329        swap_range_free(p, offset, 1);
1330}
1331
1332/*
1333 * Caller has made sure that the swap device corresponding to entry
1334 * is still around or has not been recycled.
1335 */
1336void swap_free(swp_entry_t entry)
1337{
1338        struct swap_info_struct *p;
1339
1340        p = _swap_info_get(entry);
1341        if (p)
1342                __swap_entry_free(p, entry);
1343}
1344
1345/*
1346 * Called after dropping swapcache to decrease refcnt to swap entries.
1347 */
1348void put_swap_page(struct page *page, swp_entry_t entry)
1349{
1350        unsigned long offset = swp_offset(entry);
1351        unsigned long idx = offset / SWAPFILE_CLUSTER;
1352        struct swap_cluster_info *ci;
1353        struct swap_info_struct *si;
1354        unsigned char *map;
1355        unsigned int i, free_entries = 0;
1356        unsigned char val;
1357        int size = swap_entry_size(thp_nr_pages(page));
1358
1359        si = _swap_info_get(entry);
1360        if (!si)
1361                return;
1362
1363        ci = lock_cluster_or_swap_info(si, offset);
1364        if (size == SWAPFILE_CLUSTER) {
1365                VM_BUG_ON(!cluster_is_huge(ci));
1366                map = si->swap_map + offset;
1367                for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1368                        val = map[i];
1369                        VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1370                        if (val == SWAP_HAS_CACHE)
1371                                free_entries++;
1372                }
1373                cluster_clear_huge(ci);
1374                if (free_entries == SWAPFILE_CLUSTER) {
1375                        unlock_cluster_or_swap_info(si, ci);
1376                        spin_lock(&si->lock);
1377                        mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1378                        swap_free_cluster(si, idx);
1379                        spin_unlock(&si->lock);
1380                        return;
1381                }
1382        }
1383        for (i = 0; i < size; i++, entry.val++) {
1384                if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1385                        unlock_cluster_or_swap_info(si, ci);
1386                        free_swap_slot(entry);
1387                        if (i == size - 1)
1388                                return;
1389                        lock_cluster_or_swap_info(si, offset);
1390                }
1391        }
1392        unlock_cluster_or_swap_info(si, ci);
1393}
1394
1395#ifdef CONFIG_THP_SWAP
1396int split_swap_cluster(swp_entry_t entry)
1397{
1398        struct swap_info_struct *si;
1399        struct swap_cluster_info *ci;
1400        unsigned long offset = swp_offset(entry);
1401
1402        si = _swap_info_get(entry);
1403        if (!si)
1404                return -EBUSY;
1405        ci = lock_cluster(si, offset);
1406        cluster_clear_huge(ci);
1407        unlock_cluster(ci);
1408        return 0;
1409}
1410#endif
1411
1412static int swp_entry_cmp(const void *ent1, const void *ent2)
1413{
1414        const swp_entry_t *e1 = ent1, *e2 = ent2;
1415
1416        return (int)swp_type(*e1) - (int)swp_type(*e2);
1417}
1418
1419void swapcache_free_entries(swp_entry_t *entries, int n)
1420{
1421        struct swap_info_struct *p, *prev;
1422        int i;
1423
1424        if (n <= 0)
1425                return;
1426
1427        prev = NULL;
1428        p = NULL;
1429
1430        /*
1431         * Sort swap entries by swap device, so each lock is only taken once.
1432         * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1433         * so low that it isn't necessary to optimize further.
1434         */
1435        if (nr_swapfiles > 1)
1436                sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1437        for (i = 0; i < n; ++i) {
1438                p = swap_info_get_cont(entries[i], prev);
1439                if (p)
1440                        swap_entry_free(p, entries[i]);
1441                prev = p;
1442        }
1443        if (p)
1444                spin_unlock(&p->lock);
1445}
1446
1447/*
1448 * How many references to page are currently swapped out?
1449 * This does not give an exact answer when swap count is continued,
1450 * but does include the high COUNT_CONTINUED flag to allow for that.
1451 */
1452int page_swapcount(struct page *page)
1453{
1454        int count = 0;
1455        struct swap_info_struct *p;
1456        struct swap_cluster_info *ci;
1457        swp_entry_t entry;
1458        unsigned long offset;
1459
1460        entry.val = page_private(page);
1461        p = _swap_info_get(entry);
1462        if (p) {
1463                offset = swp_offset(entry);
1464                ci = lock_cluster_or_swap_info(p, offset);
1465                count = swap_count(p->swap_map[offset]);
1466                unlock_cluster_or_swap_info(p, ci);
1467        }
1468        return count;
1469}
1470
1471int __swap_count(swp_entry_t entry)
1472{
1473        struct swap_info_struct *si;
1474        pgoff_t offset = swp_offset(entry);
1475        int count = 0;
1476
1477        si = get_swap_device(entry);
1478        if (si) {
1479                count = swap_count(si->swap_map[offset]);
1480                put_swap_device(si);
1481        }
1482        return count;
1483}
1484
1485static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1486{
1487        int count = 0;
1488        pgoff_t offset = swp_offset(entry);
1489        struct swap_cluster_info *ci;
1490
1491        ci = lock_cluster_or_swap_info(si, offset);
1492        count = swap_count(si->swap_map[offset]);
1493        unlock_cluster_or_swap_info(si, ci);
1494        return count;
1495}
1496
1497/*
1498 * How many references to @entry are currently swapped out?
1499 * This does not give an exact answer when swap count is continued,
1500 * but does include the high COUNT_CONTINUED flag to allow for that.
1501 */
1502int __swp_swapcount(swp_entry_t entry)
1503{
1504        int count = 0;
1505        struct swap_info_struct *si;
1506
1507        si = get_swap_device(entry);
1508        if (si) {
1509                count = swap_swapcount(si, entry);
1510                put_swap_device(si);
1511        }
1512        return count;
1513}
1514
1515/*
1516 * How many references to @entry are currently swapped out?
1517 * This considers COUNT_CONTINUED so it returns exact answer.
1518 */
1519int swp_swapcount(swp_entry_t entry)
1520{
1521        int count, tmp_count, n;
1522        struct swap_info_struct *p;
1523        struct swap_cluster_info *ci;
1524        struct page *page;
1525        pgoff_t offset;
1526        unsigned char *map;
1527
1528        p = _swap_info_get(entry);
1529        if (!p)
1530                return 0;
1531
1532        offset = swp_offset(entry);
1533
1534        ci = lock_cluster_or_swap_info(p, offset);
1535
1536        count = swap_count(p->swap_map[offset]);
1537        if (!(count & COUNT_CONTINUED))
1538                goto out;
1539
1540        count &= ~COUNT_CONTINUED;
1541        n = SWAP_MAP_MAX + 1;
1542
1543        page = vmalloc_to_page(p->swap_map + offset);
1544        offset &= ~PAGE_MASK;
1545        VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1546
1547        do {
1548                page = list_next_entry(page, lru);
1549                map = kmap_atomic(page);
1550                tmp_count = map[offset];
1551                kunmap_atomic(map);
1552
1553                count += (tmp_count & ~COUNT_CONTINUED) * n;
1554                n *= (SWAP_CONT_MAX + 1);
1555        } while (tmp_count & COUNT_CONTINUED);
1556out:
1557        unlock_cluster_or_swap_info(p, ci);
1558        return count;
1559}
1560
1561static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1562                                         swp_entry_t entry)
1563{
1564        struct swap_cluster_info *ci;
1565        unsigned char *map = si->swap_map;
1566        unsigned long roffset = swp_offset(entry);
1567        unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1568        int i;
1569        bool ret = false;
1570
1571        ci = lock_cluster_or_swap_info(si, offset);
1572        if (!ci || !cluster_is_huge(ci)) {
1573                if (swap_count(map[roffset]))
1574                        ret = true;
1575                goto unlock_out;
1576        }
1577        for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1578                if (swap_count(map[offset + i])) {
1579                        ret = true;
1580                        break;
1581                }
1582        }
1583unlock_out:
1584        unlock_cluster_or_swap_info(si, ci);
1585        return ret;
1586}
1587
1588static bool page_swapped(struct page *page)
1589{
1590        swp_entry_t entry;
1591        struct swap_info_struct *si;
1592
1593        if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
1594                return page_swapcount(page) != 0;
1595
1596        page = compound_head(page);
1597        entry.val = page_private(page);
1598        si = _swap_info_get(entry);
1599        if (si)
1600                return swap_page_trans_huge_swapped(si, entry);
1601        return false;
1602}
1603
1604static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1605                                         int *total_swapcount)
1606{
1607        int i, map_swapcount, _total_mapcount, _total_swapcount;
1608        unsigned long offset = 0;
1609        struct swap_info_struct *si;
1610        struct swap_cluster_info *ci = NULL;
1611        unsigned char *map = NULL;
1612        int mapcount, swapcount = 0;
1613
1614        /* hugetlbfs shouldn't call it */
1615        VM_BUG_ON_PAGE(PageHuge(page), page);
1616
1617        if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
1618                mapcount = page_trans_huge_mapcount(page, total_mapcount);
1619                if (PageSwapCache(page))
1620                        swapcount = page_swapcount(page);
1621                if (total_swapcount)
1622                        *total_swapcount = swapcount;
1623                return mapcount + swapcount;
1624        }
1625
1626        page = compound_head(page);
1627
1628        _total_mapcount = _total_swapcount = map_swapcount = 0;
1629        if (PageSwapCache(page)) {
1630                swp_entry_t entry;
1631
1632                entry.val = page_private(page);
1633                si = _swap_info_get(entry);
1634                if (si) {
1635                        map = si->swap_map;
1636                        offset = swp_offset(entry);
1637                }
1638        }
1639        if (map)
1640                ci = lock_cluster(si, offset);
1641        for (i = 0; i < HPAGE_PMD_NR; i++) {
1642                mapcount = atomic_read(&page[i]._mapcount) + 1;
1643                _total_mapcount += mapcount;
1644                if (map) {
1645                        swapcount = swap_count(map[offset + i]);
1646                        _total_swapcount += swapcount;
1647                }
1648                map_swapcount = max(map_swapcount, mapcount + swapcount);
1649        }
1650        unlock_cluster(ci);
1651        if (PageDoubleMap(page)) {
1652                map_swapcount -= 1;
1653                _total_mapcount -= HPAGE_PMD_NR;
1654        }
1655        mapcount = compound_mapcount(page);
1656        map_swapcount += mapcount;
1657        _total_mapcount += mapcount;
1658        if (total_mapcount)
1659                *total_mapcount = _total_mapcount;
1660        if (total_swapcount)
1661                *total_swapcount = _total_swapcount;
1662
1663        return map_swapcount;
1664}
1665
1666/*
1667 * We can write to an anon page without COW if there are no other references
1668 * to it.  And as a side-effect, free up its swap: because the old content
1669 * on disk will never be read, and seeking back there to write new content
1670 * later would only waste time away from clustering.
1671 *
1672 * NOTE: total_map_swapcount should not be relied upon by the caller if
1673 * reuse_swap_page() returns false, but it may be always overwritten
1674 * (see the other implementation for CONFIG_SWAP=n).
1675 */
1676bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1677{
1678        int count, total_mapcount, total_swapcount;
1679
1680        VM_BUG_ON_PAGE(!PageLocked(page), page);
1681        if (unlikely(PageKsm(page)))
1682                return false;
1683        count = page_trans_huge_map_swapcount(page, &total_mapcount,
1684                                              &total_swapcount);
1685        if (total_map_swapcount)
1686                *total_map_swapcount = total_mapcount + total_swapcount;
1687        if (count == 1 && PageSwapCache(page) &&
1688            (likely(!PageTransCompound(page)) ||
1689             /* The remaining swap count will be freed soon */
1690             total_swapcount == page_swapcount(page))) {
1691                if (!PageWriteback(page)) {
1692                        page = compound_head(page);
1693                        delete_from_swap_cache(page);
1694                        SetPageDirty(page);
1695                } else {
1696                        swp_entry_t entry;
1697                        struct swap_info_struct *p;
1698
1699                        entry.val = page_private(page);
1700                        p = swap_info_get(entry);
1701                        if (p->flags & SWP_STABLE_WRITES) {
1702                                spin_unlock(&p->lock);
1703                                return false;
1704                        }
1705                        spin_unlock(&p->lock);
1706                }
1707        }
1708
1709        return count <= 1;
1710}
1711
1712/*
1713 * If swap is getting full, or if there are no more mappings of this page,
1714 * then try_to_free_swap is called to free its swap space.
1715 */
1716int try_to_free_swap(struct page *page)
1717{
1718        VM_BUG_ON_PAGE(!PageLocked(page), page);
1719
1720        if (!PageSwapCache(page))
1721                return 0;
1722        if (PageWriteback(page))
1723                return 0;
1724        if (page_swapped(page))
1725                return 0;
1726
1727        /*
1728         * Once hibernation has begun to create its image of memory,
1729         * there's a danger that one of the calls to try_to_free_swap()
1730         * - most probably a call from __try_to_reclaim_swap() while
1731         * hibernation is allocating its own swap pages for the image,
1732         * but conceivably even a call from memory reclaim - will free
1733         * the swap from a page which has already been recorded in the
1734         * image as a clean swapcache page, and then reuse its swap for
1735         * another page of the image.  On waking from hibernation, the
1736         * original page might be freed under memory pressure, then
1737         * later read back in from swap, now with the wrong data.
1738         *
1739         * Hibernation suspends storage while it is writing the image
1740         * to disk so check that here.
1741         */
1742        if (pm_suspended_storage())
1743                return 0;
1744
1745        page = compound_head(page);
1746        delete_from_swap_cache(page);
1747        SetPageDirty(page);
1748        return 1;
1749}
1750
1751/*
1752 * Free the swap entry like above, but also try to
1753 * free the page cache entry if it is the last user.
1754 */
1755int free_swap_and_cache(swp_entry_t entry)
1756{
1757        struct swap_info_struct *p;
1758        unsigned char count;
1759
1760        if (non_swap_entry(entry))
1761                return 1;
1762
1763        p = _swap_info_get(entry);
1764        if (p) {
1765                count = __swap_entry_free(p, entry);
1766                if (count == SWAP_HAS_CACHE &&
1767                    !swap_page_trans_huge_swapped(p, entry))
1768                        __try_to_reclaim_swap(p, swp_offset(entry),
1769                                              TTRS_UNMAPPED | TTRS_FULL);
1770        }
1771        return p != NULL;
1772}
1773
1774#ifdef CONFIG_HIBERNATION
1775
1776swp_entry_t get_swap_page_of_type(int type)
1777{
1778        struct swap_info_struct *si = swap_type_to_swap_info(type);
1779        swp_entry_t entry = {0};
1780
1781        if (!si)
1782                goto fail;
1783
1784        /* This is called for allocating swap entry, not cache */
1785        spin_lock(&si->lock);
1786        if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry))
1787                atomic_long_dec(&nr_swap_pages);
1788        spin_unlock(&si->lock);
1789fail:
1790        return entry;
1791}
1792
1793/*
1794 * Find the swap type that corresponds to given device (if any).
1795 *
1796 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1797 * from 0, in which the swap header is expected to be located.
1798 *
1799 * This is needed for the suspend to disk (aka swsusp).
1800 */
1801int swap_type_of(dev_t device, sector_t offset)
1802{
1803        int type;
1804
1805        if (!device)
1806                return -1;
1807
1808        spin_lock(&swap_lock);
1809        for (type = 0; type < nr_swapfiles; type++) {
1810                struct swap_info_struct *sis = swap_info[type];
1811
1812                if (!(sis->flags & SWP_WRITEOK))
1813                        continue;
1814
1815                if (device == sis->bdev->bd_dev) {
1816                        struct swap_extent *se = first_se(sis);
1817
1818                        if (se->start_block == offset) {
1819                                spin_unlock(&swap_lock);
1820                                return type;
1821                        }
1822                }
1823        }
1824        spin_unlock(&swap_lock);
1825        return -ENODEV;
1826}
1827
1828int find_first_swap(dev_t *device)
1829{
1830        int type;
1831
1832        spin_lock(&swap_lock);
1833        for (type = 0; type < nr_swapfiles; type++) {
1834                struct swap_info_struct *sis = swap_info[type];
1835
1836                if (!(sis->flags & SWP_WRITEOK))
1837                        continue;
1838                *device = sis->bdev->bd_dev;
1839                spin_unlock(&swap_lock);
1840                return type;
1841        }
1842        spin_unlock(&swap_lock);
1843        return -ENODEV;
1844}
1845
1846/*
1847 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1848 * corresponding to given index in swap_info (swap type).
1849 */
1850sector_t swapdev_block(int type, pgoff_t offset)
1851{
1852        struct swap_info_struct *si = swap_type_to_swap_info(type);
1853        struct swap_extent *se;
1854
1855        if (!si || !(si->flags & SWP_WRITEOK))
1856                return 0;
1857        se = offset_to_swap_extent(si, offset);
1858        return se->start_block + (offset - se->start_page);
1859}
1860
1861/*
1862 * Return either the total number of swap pages of given type, or the number
1863 * of free pages of that type (depending on @free)
1864 *
1865 * This is needed for software suspend
1866 */
1867unsigned int count_swap_pages(int type, int free)
1868{
1869        unsigned int n = 0;
1870
1871        spin_lock(&swap_lock);
1872        if ((unsigned int)type < nr_swapfiles) {
1873                struct swap_info_struct *sis = swap_info[type];
1874
1875                spin_lock(&sis->lock);
1876                if (sis->flags & SWP_WRITEOK) {
1877                        n = sis->pages;
1878                        if (free)
1879                                n -= sis->inuse_pages;
1880                }
1881                spin_unlock(&sis->lock);
1882        }
1883        spin_unlock(&swap_lock);
1884        return n;
1885}
1886#endif /* CONFIG_HIBERNATION */
1887
1888static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1889{
1890        return pte_same(pte_swp_clear_flags(pte), swp_pte);
1891}
1892
1893/*
1894 * No need to decide whether this PTE shares the swap entry with others,
1895 * just let do_wp_page work it out if a write is requested later - to
1896 * force COW, vm_page_prot omits write permission from any private vma.
1897 */
1898static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1899                unsigned long addr, swp_entry_t entry, struct page *page)
1900{
1901        struct page *swapcache;
1902        spinlock_t *ptl;
1903        pte_t *pte;
1904        int ret = 1;
1905
1906        swapcache = page;
1907        page = ksm_might_need_to_copy(page, vma, addr);
1908        if (unlikely(!page))
1909                return -ENOMEM;
1910
1911        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1912        if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1913                ret = 0;
1914                goto out;
1915        }
1916
1917        dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1918        inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1919        get_page(page);
1920        set_pte_at(vma->vm_mm, addr, pte,
1921                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1922        if (page == swapcache) {
1923                page_add_anon_rmap(page, vma, addr, false);
1924        } else { /* ksm created a completely new copy */
1925                page_add_new_anon_rmap(page, vma, addr, false);
1926                lru_cache_add_inactive_or_unevictable(page, vma);
1927        }
1928        swap_free(entry);
1929out:
1930        pte_unmap_unlock(pte, ptl);
1931        if (page != swapcache) {
1932                unlock_page(page);
1933                put_page(page);
1934        }
1935        return ret;
1936}
1937
1938static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1939                        unsigned long addr, unsigned long end,
1940                        unsigned int type, bool frontswap,
1941                        unsigned long *fs_pages_to_unuse)
1942{
1943        struct page *page;
1944        swp_entry_t entry;
1945        pte_t *pte;
1946        struct swap_info_struct *si;
1947        unsigned long offset;
1948        int ret = 0;
1949        volatile unsigned char *swap_map;
1950
1951        si = swap_info[type];
1952        pte = pte_offset_map(pmd, addr);
1953        do {
1954                if (!is_swap_pte(*pte))
1955                        continue;
1956
1957                entry = pte_to_swp_entry(*pte);
1958                if (swp_type(entry) != type)
1959                        continue;
1960
1961                offset = swp_offset(entry);
1962                if (frontswap && !frontswap_test(si, offset))
1963                        continue;
1964
1965                pte_unmap(pte);
1966                swap_map = &si->swap_map[offset];
1967                page = lookup_swap_cache(entry, vma, addr);
1968                if (!page) {
1969                        struct vm_fault vmf = {
1970                                .vma = vma,
1971                                .address = addr,
1972                                .pmd = pmd,
1973                        };
1974
1975                        page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
1976                                                &vmf);
1977                }
1978                if (!page) {
1979                        if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
1980                                goto try_next;
1981                        return -ENOMEM;
1982                }
1983
1984                lock_page(page);
1985                wait_on_page_writeback(page);
1986                ret = unuse_pte(vma, pmd, addr, entry, page);
1987                if (ret < 0) {
1988                        unlock_page(page);
1989                        put_page(page);
1990                        goto out;
1991                }
1992
1993                try_to_free_swap(page);
1994                unlock_page(page);
1995                put_page(page);
1996
1997                if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) {
1998                        ret = FRONTSWAP_PAGES_UNUSED;
1999                        goto out;
2000                }
2001try_next:
2002                pte = pte_offset_map(pmd, addr);
2003        } while (pte++, addr += PAGE_SIZE, addr != end);
2004        pte_unmap(pte - 1);
2005
2006        ret = 0;
2007out:
2008        return ret;
2009}
2010
2011static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2012                                unsigned long addr, unsigned long end,
2013                                unsigned int type, bool frontswap,
2014                                unsigned long *fs_pages_to_unuse)
2015{
2016        pmd_t *pmd;
2017        unsigned long next;
2018        int ret;
2019
2020        pmd = pmd_offset(pud, addr);
2021        do {
2022                cond_resched();
2023                next = pmd_addr_end(addr, end);
2024                if (pmd_none_or_trans_huge_or_clear_bad(pmd))
2025                        continue;
2026                ret = unuse_pte_range(vma, pmd, addr, next, type,
2027                                      frontswap, fs_pages_to_unuse);
2028                if (ret)
2029                        return ret;
2030        } while (pmd++, addr = next, addr != end);
2031        return 0;
2032}
2033
2034static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2035                                unsigned long addr, unsigned long end,
2036                                unsigned int type, bool frontswap,
2037                                unsigned long *fs_pages_to_unuse)
2038{
2039        pud_t *pud;
2040        unsigned long next;
2041        int ret;
2042
2043        pud = pud_offset(p4d, addr);
2044        do {
2045                next = pud_addr_end(addr, end);
2046                if (pud_none_or_clear_bad(pud))
2047                        continue;
2048                ret = unuse_pmd_range(vma, pud, addr, next, type,
2049                                      frontswap, fs_pages_to_unuse);
2050                if (ret)
2051                        return ret;
2052        } while (pud++, addr = next, addr != end);
2053        return 0;
2054}
2055
2056static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2057                                unsigned long addr, unsigned long end,
2058                                unsigned int type, bool frontswap,
2059                                unsigned long *fs_pages_to_unuse)
2060{
2061        p4d_t *p4d;
2062        unsigned long next;
2063        int ret;
2064
2065        p4d = p4d_offset(pgd, addr);
2066        do {
2067                next = p4d_addr_end(addr, end);
2068                if (p4d_none_or_clear_bad(p4d))
2069                        continue;
2070                ret = unuse_pud_range(vma, p4d, addr, next, type,
2071                                      frontswap, fs_pages_to_unuse);
2072                if (ret)
2073                        return ret;
2074        } while (p4d++, addr = next, addr != end);
2075        return 0;
2076}
2077
2078static int unuse_vma(struct vm_area_struct *vma, unsigned int type,
2079                     bool frontswap, unsigned long *fs_pages_to_unuse)
2080{
2081        pgd_t *pgd;
2082        unsigned long addr, end, next;
2083        int ret;
2084
2085        addr = vma->vm_start;
2086        end = vma->vm_end;
2087
2088        pgd = pgd_offset(vma->vm_mm, addr);
2089        do {
2090                next = pgd_addr_end(addr, end);
2091                if (pgd_none_or_clear_bad(pgd))
2092                        continue;
2093                ret = unuse_p4d_range(vma, pgd, addr, next, type,
2094                                      frontswap, fs_pages_to_unuse);
2095                if (ret)
2096                        return ret;
2097        } while (pgd++, addr = next, addr != end);
2098        return 0;
2099}
2100
2101static int unuse_mm(struct mm_struct *mm, unsigned int type,
2102                    bool frontswap, unsigned long *fs_pages_to_unuse)
2103{
2104        struct vm_area_struct *vma;
2105        int ret = 0;
2106
2107        mmap_read_lock(mm);
2108        for (vma = mm->mmap; vma; vma = vma->vm_next) {
2109                if (vma->anon_vma) {
2110                        ret = unuse_vma(vma, type, frontswap,
2111                                        fs_pages_to_unuse);
2112                        if (ret)
2113                                break;
2114                }
2115                cond_resched();
2116        }
2117        mmap_read_unlock(mm);
2118        return ret;
2119}
2120
2121/*
2122 * Scan swap_map (or frontswap_map if frontswap parameter is true)
2123 * from current position to next entry still in use. Return 0
2124 * if there are no inuse entries after prev till end of the map.
2125 */
2126static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2127                                        unsigned int prev, bool frontswap)
2128{
2129        unsigned int i;
2130        unsigned char count;
2131
2132        /*
2133         * No need for swap_lock here: we're just looking
2134         * for whether an entry is in use, not modifying it; false
2135         * hits are okay, and sys_swapoff() has already prevented new
2136         * allocations from this area (while holding swap_lock).
2137         */
2138        for (i = prev + 1; i < si->max; i++) {
2139                count = READ_ONCE(si->swap_map[i]);
2140                if (count && swap_count(count) != SWAP_MAP_BAD)
2141                        if (!frontswap || frontswap_test(si, i))
2142                                break;
2143                if ((i % LATENCY_LIMIT) == 0)
2144                        cond_resched();
2145        }
2146
2147        if (i == si->max)
2148                i = 0;
2149
2150        return i;
2151}
2152
2153/*
2154 * If the boolean frontswap is true, only unuse pages_to_unuse pages;
2155 * pages_to_unuse==0 means all pages; ignored if frontswap is false
2156 */
2157int try_to_unuse(unsigned int type, bool frontswap,
2158                 unsigned long pages_to_unuse)
2159{
2160        struct mm_struct *prev_mm;
2161        struct mm_struct *mm;
2162        struct list_head *p;
2163        int retval = 0;
2164        struct swap_info_struct *si = swap_info[type];
2165        struct page *page;
2166        swp_entry_t entry;
2167        unsigned int i;
2168
2169        if (!READ_ONCE(si->inuse_pages))
2170                return 0;
2171
2172        if (!frontswap)
2173                pages_to_unuse = 0;
2174
2175retry:
2176        retval = shmem_unuse(type, frontswap, &pages_to_unuse);
2177        if (retval)
2178                goto out;
2179
2180        prev_mm = &init_mm;
2181        mmget(prev_mm);
2182
2183        spin_lock(&mmlist_lock);
2184        p = &init_mm.mmlist;
2185        while (READ_ONCE(si->inuse_pages) &&
2186               !signal_pending(current) &&
2187               (p = p->next) != &init_mm.mmlist) {
2188
2189                mm = list_entry(p, struct mm_struct, mmlist);
2190                if (!mmget_not_zero(mm))
2191                        continue;
2192                spin_unlock(&mmlist_lock);
2193                mmput(prev_mm);
2194                prev_mm = mm;
2195                retval = unuse_mm(mm, type, frontswap, &pages_to_unuse);
2196
2197                if (retval) {
2198                        mmput(prev_mm);
2199                        goto out;
2200                }
2201
2202                /*
2203                 * Make sure that we aren't completely killing
2204                 * interactive performance.
2205                 */
2206                cond_resched();
2207                spin_lock(&mmlist_lock);
2208        }
2209        spin_unlock(&mmlist_lock);
2210
2211        mmput(prev_mm);
2212
2213        i = 0;
2214        while (READ_ONCE(si->inuse_pages) &&
2215               !signal_pending(current) &&
2216               (i = find_next_to_unuse(si, i, frontswap)) != 0) {
2217
2218                entry = swp_entry(type, i);
2219                page = find_get_page(swap_address_space(entry), i);
2220                if (!page)
2221                        continue;
2222
2223                /*
2224                 * It is conceivable that a racing task removed this page from
2225                 * swap cache just before we acquired the page lock. The page
2226                 * might even be back in swap cache on another swap area. But
2227                 * that is okay, try_to_free_swap() only removes stale pages.
2228                 */
2229                lock_page(page);
2230                wait_on_page_writeback(page);
2231                try_to_free_swap(page);
2232                unlock_page(page);
2233                put_page(page);
2234
2235                /*
2236                 * For frontswap, we just need to unuse pages_to_unuse, if
2237                 * it was specified. Need not check frontswap again here as
2238                 * we already zeroed out pages_to_unuse if not frontswap.
2239                 */
2240                if (pages_to_unuse && --pages_to_unuse == 0)
2241                        goto out;
2242        }
2243
2244        /*
2245         * Lets check again to see if there are still swap entries in the map.
2246         * If yes, we would need to do retry the unuse logic again.
2247         * Under global memory pressure, swap entries can be reinserted back
2248         * into process space after the mmlist loop above passes over them.
2249         *
2250         * Limit the number of retries? No: when mmget_not_zero() above fails,
2251         * that mm is likely to be freeing swap from exit_mmap(), which proceeds
2252         * at its own independent pace; and even shmem_writepage() could have
2253         * been preempted after get_swap_page(), temporarily hiding that swap.
2254         * It's easy and robust (though cpu-intensive) just to keep retrying.
2255         */
2256        if (READ_ONCE(si->inuse_pages)) {
2257                if (!signal_pending(current))
2258                        goto retry;
2259                retval = -EINTR;
2260        }
2261out:
2262        return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
2263}
2264
2265/*
2266 * After a successful try_to_unuse, if no swap is now in use, we know
2267 * we can empty the mmlist.  swap_lock must be held on entry and exit.
2268 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2269 * added to the mmlist just after page_duplicate - before would be racy.
2270 */
2271static void drain_mmlist(void)
2272{
2273        struct list_head *p, *next;
2274        unsigned int type;
2275
2276        for (type = 0; type < nr_swapfiles; type++)
2277                if (swap_info[type]->inuse_pages)
2278                        return;
2279        spin_lock(&mmlist_lock);
2280        list_for_each_safe(p, next, &init_mm.mmlist)
2281                list_del_init(p);
2282        spin_unlock(&mmlist_lock);
2283}
2284
2285/*
2286 * Free all of a swapdev's extent information
2287 */
2288static void destroy_swap_extents(struct swap_info_struct *sis)
2289{
2290        while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2291                struct rb_node *rb = sis->swap_extent_root.rb_node;
2292                struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2293
2294                rb_erase(rb, &sis->swap_extent_root);
2295                kfree(se);
2296        }
2297
2298        if (sis->flags & SWP_ACTIVATED) {
2299                struct file *swap_file = sis->swap_file;
2300                struct address_space *mapping = swap_file->f_mapping;
2301
2302                sis->flags &= ~SWP_ACTIVATED;
2303                if (mapping->a_ops->swap_deactivate)
2304                        mapping->a_ops->swap_deactivate(swap_file);
2305        }
2306}
2307
2308/*
2309 * Add a block range (and the corresponding page range) into this swapdev's
2310 * extent tree.
2311 *
2312 * This function rather assumes that it is called in ascending page order.
2313 */
2314int
2315add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2316                unsigned long nr_pages, sector_t start_block)
2317{
2318        struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2319        struct swap_extent *se;
2320        struct swap_extent *new_se;
2321
2322        /*
2323         * place the new node at the right most since the
2324         * function is called in ascending page order.
2325         */
2326        while (*link) {
2327                parent = *link;
2328                link = &parent->rb_right;
2329        }
2330
2331        if (parent) {
2332                se = rb_entry(parent, struct swap_extent, rb_node);
2333                BUG_ON(se->start_page + se->nr_pages != start_page);
2334                if (se->start_block + se->nr_pages == start_block) {
2335                        /* Merge it */
2336                        se->nr_pages += nr_pages;
2337                        return 0;
2338                }
2339        }
2340
2341        /* No merge, insert a new extent. */
2342        new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2343        if (new_se == NULL)
2344                return -ENOMEM;
2345        new_se->start_page = start_page;
2346        new_se->nr_pages = nr_pages;
2347        new_se->start_block = start_block;
2348
2349        rb_link_node(&new_se->rb_node, parent, link);
2350        rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2351        return 1;
2352}
2353EXPORT_SYMBOL_GPL(add_swap_extent);
2354
2355/*
2356 * A `swap extent' is a simple thing which maps a contiguous range of pages
2357 * onto a contiguous range of disk blocks.  An ordered list of swap extents
2358 * is built at swapon time and is then used at swap_writepage/swap_readpage
2359 * time for locating where on disk a page belongs.
2360 *
2361 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2362 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2363 * swap files identically.
2364 *
2365 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2366 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2367 * swapfiles are handled *identically* after swapon time.
2368 *
2369 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2370 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
2371 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
2372 * requirements, they are simply tossed out - we will never use those blocks
2373 * for swapping.
2374 *
2375 * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2376 * prevents users from writing to the swap device, which will corrupt memory.
2377 *
2378 * The amount of disk space which a single swap extent represents varies.
2379 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2380 * extents in the list.  To avoid much list walking, we cache the previous
2381 * search location in `curr_swap_extent', and start new searches from there.
2382 * This is extremely effective.  The average number of iterations in
2383 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
2384 */
2385static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2386{
2387        struct file *swap_file = sis->swap_file;
2388        struct address_space *mapping = swap_file->f_mapping;
2389        struct inode *inode = mapping->host;
2390        int ret;
2391
2392        if (S_ISBLK(inode->i_mode)) {
2393                ret = add_swap_extent(sis, 0, sis->max, 0);
2394                *span = sis->pages;
2395                return ret;
2396        }
2397
2398        if (mapping->a_ops->swap_activate) {
2399                ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2400                if (ret >= 0)
2401                        sis->flags |= SWP_ACTIVATED;
2402                if (!ret) {
2403                        sis->flags |= SWP_FS_OPS;
2404                        ret = add_swap_extent(sis, 0, sis->max, 0);
2405                        *span = sis->pages;
2406                }
2407                return ret;
2408        }
2409
2410        return generic_swapfile_activate(sis, swap_file, span);
2411}
2412
2413static int swap_node(struct swap_info_struct *p)
2414{
2415        struct block_device *bdev;
2416
2417        if (p->bdev)
2418                bdev = p->bdev;
2419        else
2420                bdev = p->swap_file->f_inode->i_sb->s_bdev;
2421
2422        return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2423}
2424
2425static void setup_swap_info(struct swap_info_struct *p, int prio,
2426                            unsigned char *swap_map,
2427                            struct swap_cluster_info *cluster_info)
2428{
2429        int i;
2430
2431        if (prio >= 0)
2432                p->prio = prio;
2433        else
2434                p->prio = --least_priority;
2435        /*
2436         * the plist prio is negated because plist ordering is
2437         * low-to-high, while swap ordering is high-to-low
2438         */
2439        p->list.prio = -p->prio;
2440        for_each_node(i) {
2441                if (p->prio >= 0)
2442                        p->avail_lists[i].prio = -p->prio;
2443                else {
2444                        if (swap_node(p) == i)
2445                                p->avail_lists[i].prio = 1;
2446                        else
2447                                p->avail_lists[i].prio = -p->prio;
2448                }
2449        }
2450        p->swap_map = swap_map;
2451        p->cluster_info = cluster_info;
2452}
2453
2454static void _enable_swap_info(struct swap_info_struct *p)
2455{
2456        p->flags |= SWP_WRITEOK;
2457        atomic_long_add(p->pages, &nr_swap_pages);
2458        total_swap_pages += p->pages;
2459
2460        assert_spin_locked(&swap_lock);
2461        /*
2462         * both lists are plists, and thus priority ordered.
2463         * swap_active_head needs to be priority ordered for swapoff(),
2464         * which on removal of any swap_info_struct with an auto-assigned
2465         * (i.e. negative) priority increments the auto-assigned priority
2466         * of any lower-priority swap_info_structs.
2467         * swap_avail_head needs to be priority ordered for get_swap_page(),
2468         * which allocates swap pages from the highest available priority
2469         * swap_info_struct.
2470         */
2471        plist_add(&p->list, &swap_active_head);
2472        add_to_avail_list(p);
2473}
2474
2475static void enable_swap_info(struct swap_info_struct *p, int prio,
2476                                unsigned char *swap_map,
2477                                struct swap_cluster_info *cluster_info,
2478                                unsigned long *frontswap_map)
2479{
2480        frontswap_init(p->type, frontswap_map);
2481        spin_lock(&swap_lock);
2482        spin_lock(&p->lock);
2483        setup_swap_info(p, prio, swap_map, cluster_info);
2484        spin_unlock(&p->lock);
2485        spin_unlock(&swap_lock);
2486        /*
2487         * Finished initializing swap device, now it's safe to reference it.
2488         */
2489        percpu_ref_resurrect(&p->users);
2490        spin_lock(&swap_lock);
2491        spin_lock(&p->lock);
2492        _enable_swap_info(p);
2493        spin_unlock(&p->lock);
2494        spin_unlock(&swap_lock);
2495}
2496
2497static void reinsert_swap_info(struct swap_info_struct *p)
2498{
2499        spin_lock(&swap_lock);
2500        spin_lock(&p->lock);
2501        setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2502        _enable_swap_info(p);
2503        spin_unlock(&p->lock);
2504        spin_unlock(&swap_lock);
2505}
2506
2507bool has_usable_swap(void)
2508{
2509        bool ret = true;
2510
2511        spin_lock(&swap_lock);
2512        if (plist_head_empty(&swap_active_head))
2513                ret = false;
2514        spin_unlock(&swap_lock);
2515        return ret;
2516}
2517
2518SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2519{
2520        struct swap_info_struct *p = NULL;
2521        unsigned char *swap_map;
2522        struct swap_cluster_info *cluster_info;
2523        unsigned long *frontswap_map;
2524        struct file *swap_file, *victim;
2525        struct address_space *mapping;
2526        struct inode *inode;
2527        struct filename *pathname;
2528        int err, found = 0;
2529        unsigned int old_block_size;
2530
2531        if (!capable(CAP_SYS_ADMIN))
2532                return -EPERM;
2533
2534        BUG_ON(!current->mm);
2535
2536        pathname = getname(specialfile);
2537        if (IS_ERR(pathname))
2538                return PTR_ERR(pathname);
2539
2540        victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2541        err = PTR_ERR(victim);
2542        if (IS_ERR(victim))
2543                goto out;
2544
2545        mapping = victim->f_mapping;
2546        spin_lock(&swap_lock);
2547        plist_for_each_entry(p, &swap_active_head, list) {
2548                if (p->flags & SWP_WRITEOK) {
2549                        if (p->swap_file->f_mapping == mapping) {
2550                                found = 1;
2551                                break;
2552                        }
2553                }
2554        }
2555        if (!found) {
2556                err = -EINVAL;
2557                spin_unlock(&swap_lock);
2558                goto out_dput;
2559        }
2560        if (!security_vm_enough_memory_mm(current->mm, p->pages))
2561                vm_unacct_memory(p->pages);
2562        else {
2563                err = -ENOMEM;
2564                spin_unlock(&swap_lock);
2565                goto out_dput;
2566        }
2567        del_from_avail_list(p);
2568        spin_lock(&p->lock);
2569        if (p->prio < 0) {
2570                struct swap_info_struct *si = p;
2571                int nid;
2572
2573                plist_for_each_entry_continue(si, &swap_active_head, list) {
2574                        si->prio++;
2575                        si->list.prio--;
2576                        for_each_node(nid) {
2577                                if (si->avail_lists[nid].prio != 1)
2578                                        si->avail_lists[nid].prio--;
2579                        }
2580                }
2581                least_priority++;
2582        }
2583        plist_del(&p->list, &swap_active_head);
2584        atomic_long_sub(p->pages, &nr_swap_pages);
2585        total_swap_pages -= p->pages;
2586        p->flags &= ~SWP_WRITEOK;
2587        spin_unlock(&p->lock);
2588        spin_unlock(&swap_lock);
2589
2590        disable_swap_slots_cache_lock();
2591
2592        set_current_oom_origin();
2593        err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
2594        clear_current_oom_origin();
2595
2596        if (err) {
2597                /* re-insert swap space back into swap_list */
2598                reinsert_swap_info(p);
2599                reenable_swap_slots_cache_unlock();
2600                goto out_dput;
2601        }
2602
2603        reenable_swap_slots_cache_unlock();
2604
2605        /*
2606         * Wait for swap operations protected by get/put_swap_device()
2607         * to complete.
2608         *
2609         * We need synchronize_rcu() here to protect the accessing to
2610         * the swap cache data structure.
2611         */
2612        percpu_ref_kill(&p->users);
2613        synchronize_rcu();
2614        wait_for_completion(&p->comp);
2615
2616        flush_work(&p->discard_work);
2617
2618        destroy_swap_extents(p);
2619        if (p->flags & SWP_CONTINUED)
2620                free_swap_count_continuations(p);
2621
2622        if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
2623                atomic_dec(&nr_rotate_swap);
2624
2625        mutex_lock(&swapon_mutex);
2626        spin_lock(&swap_lock);
2627        spin_lock(&p->lock);
2628        drain_mmlist();
2629
2630        /* wait for anyone still in scan_swap_map_slots */
2631        p->highest_bit = 0;             /* cuts scans short */
2632        while (p->flags >= SWP_SCANNING) {
2633                spin_unlock(&p->lock);
2634                spin_unlock(&swap_lock);
2635                schedule_timeout_uninterruptible(1);
2636                spin_lock(&swap_lock);
2637                spin_lock(&p->lock);
2638        }
2639
2640        swap_file = p->swap_file;
2641        old_block_size = p->old_block_size;
2642        p->swap_file = NULL;
2643        p->max = 0;
2644        swap_map = p->swap_map;
2645        p->swap_map = NULL;
2646        cluster_info = p->cluster_info;
2647        p->cluster_info = NULL;
2648        frontswap_map = frontswap_map_get(p);
2649        spin_unlock(&p->lock);
2650        spin_unlock(&swap_lock);
2651        arch_swap_invalidate_area(p->type);
2652        frontswap_invalidate_area(p->type);
2653        frontswap_map_set(p, NULL);
2654        mutex_unlock(&swapon_mutex);
2655        free_percpu(p->percpu_cluster);
2656        p->percpu_cluster = NULL;
2657        free_percpu(p->cluster_next_cpu);
2658        p->cluster_next_cpu = NULL;
2659        vfree(swap_map);
2660        kvfree(cluster_info);
2661        kvfree(frontswap_map);
2662        /* Destroy swap account information */
2663        swap_cgroup_swapoff(p->type);
2664        exit_swap_address_space(p->type);
2665
2666        inode = mapping->host;
2667        if (S_ISBLK(inode->i_mode)) {
2668                struct block_device *bdev = I_BDEV(inode);
2669
2670                set_blocksize(bdev, old_block_size);
2671                blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2672        }
2673
2674        inode_lock(inode);
2675        inode->i_flags &= ~S_SWAPFILE;
2676        inode_unlock(inode);
2677        filp_close(swap_file, NULL);
2678
2679        /*
2680         * Clear the SWP_USED flag after all resources are freed so that swapon
2681         * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2682         * not hold p->lock after we cleared its SWP_WRITEOK.
2683         */
2684        spin_lock(&swap_lock);
2685        p->flags = 0;
2686        spin_unlock(&swap_lock);
2687
2688        err = 0;
2689        atomic_inc(&proc_poll_event);
2690        wake_up_interruptible(&proc_poll_wait);
2691
2692out_dput:
2693        filp_close(victim, NULL);
2694out:
2695        putname(pathname);
2696        return err;
2697}
2698
2699#ifdef CONFIG_PROC_FS
2700static __poll_t swaps_poll(struct file *file, poll_table *wait)
2701{
2702        struct seq_file *seq = file->private_data;
2703
2704        poll_wait(file, &proc_poll_wait, wait);
2705
2706        if (seq->poll_event != atomic_read(&proc_poll_event)) {
2707                seq->poll_event = atomic_read(&proc_poll_event);
2708                return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2709        }
2710
2711        return EPOLLIN | EPOLLRDNORM;
2712}
2713
2714/* iterator */
2715static void *swap_start(struct seq_file *swap, loff_t *pos)
2716{
2717        struct swap_info_struct *si;
2718        int type;
2719        loff_t l = *pos;
2720
2721        mutex_lock(&swapon_mutex);
2722
2723        if (!l)
2724                return SEQ_START_TOKEN;
2725
2726        for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2727                if (!(si->flags & SWP_USED) || !si->swap_map)
2728                        continue;
2729                if (!--l)
2730                        return si;
2731        }
2732
2733        return NULL;
2734}
2735
2736static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2737{
2738        struct swap_info_struct *si = v;
2739        int type;
2740
2741        if (v == SEQ_START_TOKEN)
2742                type = 0;
2743        else
2744                type = si->type + 1;
2745
2746        ++(*pos);
2747        for (; (si = swap_type_to_swap_info(type)); type++) {
2748                if (!(si->flags & SWP_USED) || !si->swap_map)
2749                        continue;
2750                return si;
2751        }
2752
2753        return NULL;
2754}
2755
2756static void swap_stop(struct seq_file *swap, void *v)
2757{
2758        mutex_unlock(&swapon_mutex);
2759}
2760
2761static int swap_show(struct seq_file *swap, void *v)
2762{
2763        struct swap_info_struct *si = v;
2764        struct file *file;
2765        int len;
2766        unsigned int bytes, inuse;
2767
2768        if (si == SEQ_START_TOKEN) {
2769                seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2770                return 0;
2771        }
2772
2773        bytes = si->pages << (PAGE_SHIFT - 10);
2774        inuse = si->inuse_pages << (PAGE_SHIFT - 10);
2775
2776        file = si->swap_file;
2777        len = seq_file_path(swap, file, " \t\n\\");
2778        seq_printf(swap, "%*s%s\t%u\t%s%u\t%s%d\n",
2779                        len < 40 ? 40 - len : 1, " ",
2780                        S_ISBLK(file_inode(file)->i_mode) ?
2781                                "partition" : "file\t",
2782                        bytes, bytes < 10000000 ? "\t" : "",
2783                        inuse, inuse < 10000000 ? "\t" : "",
2784                        si->prio);
2785        return 0;
2786}
2787
2788static const struct seq_operations swaps_op = {
2789        .start =        swap_start,
2790        .next =         swap_next,
2791        .stop =         swap_stop,
2792        .show =         swap_show
2793};
2794
2795static int swaps_open(struct inode *inode, struct file *file)
2796{
2797        struct seq_file *seq;
2798        int ret;
2799
2800        ret = seq_open(file, &swaps_op);
2801        if (ret)
2802                return ret;
2803
2804        seq = file->private_data;
2805        seq->poll_event = atomic_read(&proc_poll_event);
2806        return 0;
2807}
2808
2809static const struct proc_ops swaps_proc_ops = {
2810        .proc_flags     = PROC_ENTRY_PERMANENT,
2811        .proc_open      = swaps_open,
2812        .proc_read      = seq_read,
2813        .proc_lseek     = seq_lseek,
2814        .proc_release   = seq_release,
2815        .proc_poll      = swaps_poll,
2816};
2817
2818static int __init procswaps_init(void)
2819{
2820        proc_create("swaps", 0, NULL, &swaps_proc_ops);
2821        return 0;
2822}
2823__initcall(procswaps_init);
2824#endif /* CONFIG_PROC_FS */
2825
2826#ifdef MAX_SWAPFILES_CHECK
2827static int __init max_swapfiles_check(void)
2828{
2829        MAX_SWAPFILES_CHECK();
2830        return 0;
2831}
2832late_initcall(max_swapfiles_check);
2833#endif
2834
2835static struct swap_info_struct *alloc_swap_info(void)
2836{
2837        struct swap_info_struct *p;
2838        struct swap_info_struct *defer = NULL;
2839        unsigned int type;
2840        int i;
2841
2842        p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2843        if (!p)
2844                return ERR_PTR(-ENOMEM);
2845
2846        if (percpu_ref_init(&p->users, swap_users_ref_free,
2847                            PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2848                kvfree(p);
2849                return ERR_PTR(-ENOMEM);
2850        }
2851
2852        spin_lock(&swap_lock);
2853        for (type = 0; type < nr_swapfiles; type++) {
2854                if (!(swap_info[type]->flags & SWP_USED))
2855                        break;
2856        }
2857        if (type >= MAX_SWAPFILES) {
2858                spin_unlock(&swap_lock);
2859                percpu_ref_exit(&p->users);
2860                kvfree(p);
2861                return ERR_PTR(-EPERM);
2862        }
2863        if (type >= nr_swapfiles) {
2864                p->type = type;
2865                /*
2866                 * Publish the swap_info_struct after initializing it.
2867                 * Note that kvzalloc() above zeroes all its fields.
2868                 */
2869                smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
2870                nr_swapfiles++;
2871        } else {
2872                defer = p;
2873                p = swap_info[type];
2874                /*
2875                 * Do not memset this entry: a racing procfs swap_next()
2876                 * would be relying on p->type to remain valid.
2877                 */
2878        }
2879        p->swap_extent_root = RB_ROOT;
2880        plist_node_init(&p->list, 0);
2881        for_each_node(i)
2882                plist_node_init(&p->avail_lists[i], 0);
2883        p->flags = SWP_USED;
2884        spin_unlock(&swap_lock);
2885        if (defer) {
2886                percpu_ref_exit(&defer->users);
2887                kvfree(defer);
2888        }
2889        spin_lock_init(&p->lock);
2890        spin_lock_init(&p->cont_lock);
2891        init_completion(&p->comp);
2892
2893        return p;
2894}
2895
2896static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2897{
2898        int error;
2899
2900        if (S_ISBLK(inode->i_mode)) {
2901                p->bdev = blkdev_get_by_dev(inode->i_rdev,
2902                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2903                if (IS_ERR(p->bdev)) {
2904                        error = PTR_ERR(p->bdev);
2905                        p->bdev = NULL;
2906                        return error;
2907                }
2908                p->old_block_size = block_size(p->bdev);
2909                error = set_blocksize(p->bdev, PAGE_SIZE);
2910                if (error < 0)
2911                        return error;
2912                /*
2913                 * Zoned block devices contain zones that have a sequential
2914                 * write only restriction.  Hence zoned block devices are not
2915                 * suitable for swapping.  Disallow them here.
2916                 */
2917                if (blk_queue_is_zoned(p->bdev->bd_disk->queue))
2918                        return -EINVAL;
2919                p->flags |= SWP_BLKDEV;
2920        } else if (S_ISREG(inode->i_mode)) {
2921                p->bdev = inode->i_sb->s_bdev;
2922        }
2923
2924        return 0;
2925}
2926
2927
2928/*
2929 * Find out how many pages are allowed for a single swap device. There
2930 * are two limiting factors:
2931 * 1) the number of bits for the swap offset in the swp_entry_t type, and
2932 * 2) the number of bits in the swap pte, as defined by the different
2933 * architectures.
2934 *
2935 * In order to find the largest possible bit mask, a swap entry with
2936 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
2937 * decoded to a swp_entry_t again, and finally the swap offset is
2938 * extracted.
2939 *
2940 * This will mask all the bits from the initial ~0UL mask that can't
2941 * be encoded in either the swp_entry_t or the architecture definition
2942 * of a swap pte.
2943 */
2944unsigned long generic_max_swapfile_size(void)
2945{
2946        return swp_offset(pte_to_swp_entry(
2947                        swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2948}
2949
2950/* Can be overridden by an architecture for additional checks. */
2951__weak unsigned long max_swapfile_size(void)
2952{
2953        return generic_max_swapfile_size();
2954}
2955
2956static unsigned long read_swap_header(struct swap_info_struct *p,
2957                                        union swap_header *swap_header,
2958                                        struct inode *inode)
2959{
2960        int i;
2961        unsigned long maxpages;
2962        unsigned long swapfilepages;
2963        unsigned long last_page;
2964
2965        if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2966                pr_err("Unable to find swap-space signature\n");
2967                return 0;
2968        }
2969
2970        /* swap partition endianness hack... */
2971        if (swab32(swap_header->info.version) == 1) {
2972                swab32s(&swap_header->info.version);
2973                swab32s(&swap_header->info.last_page);
2974                swab32s(&swap_header->info.nr_badpages);
2975                if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2976                        return 0;
2977                for (i = 0; i < swap_header->info.nr_badpages; i++)
2978                        swab32s(&swap_header->info.badpages[i]);
2979        }
2980        /* Check the swap header's sub-version */
2981        if (swap_header->info.version != 1) {
2982                pr_warn("Unable to handle swap header version %d\n",
2983                        swap_header->info.version);
2984                return 0;
2985        }
2986
2987        p->lowest_bit  = 1;
2988        p->cluster_next = 1;
2989        p->cluster_nr = 0;
2990
2991        maxpages = max_swapfile_size();
2992        last_page = swap_header->info.last_page;
2993        if (!last_page) {
2994                pr_warn("Empty swap-file\n");
2995                return 0;
2996        }
2997        if (last_page > maxpages) {
2998                pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2999                        maxpages << (PAGE_SHIFT - 10),
3000                        last_page << (PAGE_SHIFT - 10));
3001        }
3002        if (maxpages > last_page) {
3003                maxpages = last_page + 1;
3004                /* p->max is an unsigned int: don't overflow it */
3005                if ((unsigned int)maxpages == 0)
3006                        maxpages = UINT_MAX;
3007        }
3008        p->highest_bit = maxpages - 1;
3009
3010        if (!maxpages)
3011                return 0;
3012        swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3013        if (swapfilepages && maxpages > swapfilepages) {
3014                pr_warn("Swap area shorter than signature indicates\n");
3015                return 0;
3016        }
3017        if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3018                return 0;
3019        if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3020                return 0;
3021
3022        return maxpages;
3023}
3024
3025#define SWAP_CLUSTER_INFO_COLS                                          \
3026        DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3027#define SWAP_CLUSTER_SPACE_COLS                                         \
3028        DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3029#define SWAP_CLUSTER_COLS                                               \
3030        max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3031
3032static int setup_swap_map_and_extents(struct swap_info_struct *p,
3033                                        union swap_header *swap_header,
3034                                        unsigned char *swap_map,
3035                                        struct swap_cluster_info *cluster_info,
3036                                        unsigned long maxpages,
3037                                        sector_t *span)
3038{
3039        unsigned int j, k;
3040        unsigned int nr_good_pages;
3041        int nr_extents;
3042        unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3043        unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3044        unsigned long i, idx;
3045
3046        nr_good_pages = maxpages - 1;   /* omit header page */
3047
3048        cluster_list_init(&p->free_clusters);
3049        cluster_list_init(&p->discard_clusters);
3050
3051        for (i = 0; i < swap_header->info.nr_badpages; i++) {
3052                unsigned int page_nr = swap_header->info.badpages[i];
3053                if (page_nr == 0 || page_nr > swap_header->info.last_page)
3054                        return -EINVAL;
3055                if (page_nr < maxpages) {
3056                        swap_map[page_nr] = SWAP_MAP_BAD;
3057                        nr_good_pages--;
3058                        /*
3059                         * Haven't marked the cluster free yet, no list
3060                         * operation involved
3061                         */
3062                        inc_cluster_info_page(p, cluster_info, page_nr);
3063                }
3064        }
3065
3066        /* Haven't marked the cluster free yet, no list operation involved */
3067        for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3068                inc_cluster_info_page(p, cluster_info, i);
3069
3070        if (nr_good_pages) {
3071                swap_map[0] = SWAP_MAP_BAD;
3072                /*
3073                 * Not mark the cluster free yet, no list
3074                 * operation involved
3075                 */
3076                inc_cluster_info_page(p, cluster_info, 0);
3077                p->max = maxpages;
3078                p->pages = nr_good_pages;
3079                nr_extents = setup_swap_extents(p, span);
3080                if (nr_extents < 0)
3081                        return nr_extents;
3082                nr_good_pages = p->pages;
3083        }
3084        if (!nr_good_pages) {
3085                pr_warn("Empty swap-file\n");
3086                return -EINVAL;
3087        }
3088
3089        if (!cluster_info)
3090                return nr_extents;
3091
3092
3093        /*
3094         * Reduce false cache line sharing between cluster_info and
3095         * sharing same address space.
3096         */
3097        for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3098                j = (k + col) % SWAP_CLUSTER_COLS;
3099                for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3100                        idx = i * SWAP_CLUSTER_COLS + j;
3101                        if (idx >= nr_clusters)
3102                                continue;
3103                        if (cluster_count(&cluster_info[idx]))
3104                                continue;
3105                        cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
3106                        cluster_list_add_tail(&p->free_clusters, cluster_info,
3107                                              idx);
3108                }
3109        }
3110        return nr_extents;
3111}
3112
3113/*
3114 * Helper to sys_swapon determining if a given swap
3115 * backing device queue supports DISCARD operations.
3116 */
3117static bool swap_discardable(struct swap_info_struct *si)
3118{
3119        struct request_queue *q = bdev_get_queue(si->bdev);
3120
3121        if (!q || !blk_queue_discard(q))
3122                return false;
3123
3124        return true;
3125}
3126
3127SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3128{
3129        struct swap_info_struct *p;
3130        struct filename *name;
3131        struct file *swap_file = NULL;
3132        struct address_space *mapping;
3133        int prio;
3134        int error;
3135        union swap_header *swap_header;
3136        int nr_extents;
3137        sector_t span;
3138        unsigned long maxpages;
3139        unsigned char *swap_map = NULL;
3140        struct swap_cluster_info *cluster_info = NULL;
3141        unsigned long *frontswap_map = NULL;
3142        struct page *page = NULL;
3143        struct inode *inode = NULL;
3144        bool inced_nr_rotate_swap = false;
3145
3146        if (swap_flags & ~SWAP_FLAGS_VALID)
3147                return -EINVAL;
3148
3149        if (!capable(CAP_SYS_ADMIN))
3150                return -EPERM;
3151
3152        if (!swap_avail_heads)
3153                return -ENOMEM;
3154
3155        p = alloc_swap_info();
3156        if (IS_ERR(p))
3157                return PTR_ERR(p);
3158
3159        INIT_WORK(&p->discard_work, swap_discard_work);
3160
3161        name = getname(specialfile);
3162        if (IS_ERR(name)) {
3163                error = PTR_ERR(name);
3164                name = NULL;
3165                goto bad_swap;
3166        }
3167        swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3168        if (IS_ERR(swap_file)) {
3169                error = PTR_ERR(swap_file);
3170                swap_file = NULL;
3171                goto bad_swap;
3172        }
3173
3174        p->swap_file = swap_file;
3175        mapping = swap_file->f_mapping;
3176        inode = mapping->host;
3177
3178        error = claim_swapfile(p, inode);
3179        if (unlikely(error))
3180                goto bad_swap;
3181
3182        inode_lock(inode);
3183        if (IS_SWAPFILE(inode)) {
3184                error = -EBUSY;
3185                goto bad_swap_unlock_inode;
3186        }
3187
3188        /*
3189         * Read the swap header.
3190         */
3191        if (!mapping->a_ops->readpage) {
3192                error = -EINVAL;
3193                goto bad_swap_unlock_inode;
3194        }
3195        page = read_mapping_page(mapping, 0, swap_file);
3196        if (IS_ERR(page)) {
3197                error = PTR_ERR(page);
3198                goto bad_swap_unlock_inode;
3199        }
3200        swap_header = kmap(page);
3201
3202        maxpages = read_swap_header(p, swap_header, inode);
3203        if (unlikely(!maxpages)) {
3204                error = -EINVAL;
3205                goto bad_swap_unlock_inode;
3206        }
3207
3208        /* OK, set up the swap map and apply the bad block list */
3209        swap_map = vzalloc(maxpages);
3210        if (!swap_map) {
3211                error = -ENOMEM;
3212                goto bad_swap_unlock_inode;
3213        }
3214
3215        if (p->bdev && blk_queue_stable_writes(p->bdev->bd_disk->queue))
3216                p->flags |= SWP_STABLE_WRITES;
3217
3218        if (p->bdev && p->bdev->bd_disk->fops->rw_page)
3219                p->flags |= SWP_SYNCHRONOUS_IO;
3220
3221        if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
3222                int cpu;
3223                unsigned long ci, nr_cluster;
3224
3225                p->flags |= SWP_SOLIDSTATE;
3226                p->cluster_next_cpu = alloc_percpu(unsigned int);
3227                if (!p->cluster_next_cpu) {
3228                        error = -ENOMEM;
3229                        goto bad_swap_unlock_inode;
3230                }
3231                /*
3232                 * select a random position to start with to help wear leveling
3233                 * SSD
3234                 */
3235                for_each_possible_cpu(cpu) {
3236                        per_cpu(*p->cluster_next_cpu, cpu) =
3237                                1 + prandom_u32_max(p->highest_bit);
3238                }
3239                nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3240
3241                cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3242                                        GFP_KERNEL);
3243                if (!cluster_info) {
3244                        error = -ENOMEM;
3245                        goto bad_swap_unlock_inode;
3246                }
3247
3248                for (ci = 0; ci < nr_cluster; ci++)
3249                        spin_lock_init(&((cluster_info + ci)->lock));
3250
3251                p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3252                if (!p->percpu_cluster) {
3253                        error = -ENOMEM;
3254                        goto bad_swap_unlock_inode;
3255                }
3256                for_each_possible_cpu(cpu) {
3257                        struct percpu_cluster *cluster;
3258                        cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3259                        cluster_set_null(&cluster->index);
3260                }
3261        } else {
3262                atomic_inc(&nr_rotate_swap);
3263                inced_nr_rotate_swap = true;
3264        }
3265
3266        error = swap_cgroup_swapon(p->type, maxpages);
3267        if (error)
3268                goto bad_swap_unlock_inode;
3269
3270        nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3271                cluster_info, maxpages, &span);
3272        if (unlikely(nr_extents < 0)) {
3273                error = nr_extents;
3274                goto bad_swap_unlock_inode;
3275        }
3276        /* frontswap enabled? set up bit-per-page map for frontswap */
3277        if (IS_ENABLED(CONFIG_FRONTSWAP))
3278                frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
3279                                         sizeof(long),
3280                                         GFP_KERNEL);
3281
3282        if (p->bdev && (swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
3283                /*
3284                 * When discard is enabled for swap with no particular
3285                 * policy flagged, we set all swap discard flags here in
3286                 * order to sustain backward compatibility with older
3287                 * swapon(8) releases.
3288                 */
3289                p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3290                             SWP_PAGE_DISCARD);
3291
3292                /*
3293                 * By flagging sys_swapon, a sysadmin can tell us to
3294                 * either do single-time area discards only, or to just
3295                 * perform discards for released swap page-clusters.
3296                 * Now it's time to adjust the p->flags accordingly.
3297                 */
3298                if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3299                        p->flags &= ~SWP_PAGE_DISCARD;
3300                else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3301                        p->flags &= ~SWP_AREA_DISCARD;
3302
3303                /* issue a swapon-time discard if it's still required */
3304                if (p->flags & SWP_AREA_DISCARD) {
3305                        int err = discard_swap(p);
3306                        if (unlikely(err))
3307                                pr_err("swapon: discard_swap(%p): %d\n",
3308                                        p, err);
3309                }
3310        }
3311
3312        error = init_swap_address_space(p->type, maxpages);
3313        if (error)
3314                goto bad_swap_unlock_inode;
3315
3316        /*
3317         * Flush any pending IO and dirty mappings before we start using this
3318         * swap device.
3319         */
3320        inode->i_flags |= S_SWAPFILE;
3321        error = inode_drain_writes(inode);
3322        if (error) {
3323                inode->i_flags &= ~S_SWAPFILE;
3324                goto free_swap_address_space;
3325        }
3326
3327        mutex_lock(&swapon_mutex);
3328        prio = -1;
3329        if (swap_flags & SWAP_FLAG_PREFER)
3330                prio =
3331                  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3332        enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3333
3334        pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3335                p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3336                nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3337                (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3338                (p->flags & SWP_DISCARDABLE) ? "D" : "",
3339                (p->flags & SWP_AREA_DISCARD) ? "s" : "",
3340                (p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3341                (frontswap_map) ? "FS" : "");
3342
3343        mutex_unlock(&swapon_mutex);
3344        atomic_inc(&proc_poll_event);
3345        wake_up_interruptible(&proc_poll_wait);
3346
3347        error = 0;
3348        goto out;
3349free_swap_address_space:
3350        exit_swap_address_space(p->type);
3351bad_swap_unlock_inode:
3352        inode_unlock(inode);
3353bad_swap:
3354        free_percpu(p->percpu_cluster);
3355        p->percpu_cluster = NULL;
3356        free_percpu(p->cluster_next_cpu);
3357        p->cluster_next_cpu = NULL;
3358        if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3359                set_blocksize(p->bdev, p->old_block_size);
3360                blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3361        }
3362        inode = NULL;
3363        destroy_swap_extents(p);
3364        swap_cgroup_swapoff(p->type);
3365        spin_lock(&swap_lock);
3366        p->swap_file = NULL;
3367        p->flags = 0;
3368        spin_unlock(&swap_lock);
3369        vfree(swap_map);
3370        kvfree(cluster_info);
3371        kvfree(frontswap_map);
3372        if (inced_nr_rotate_swap)
3373                atomic_dec(&nr_rotate_swap);
3374        if (swap_file)
3375                filp_close(swap_file, NULL);
3376out:
3377        if (page && !IS_ERR(page)) {
3378                kunmap(page);
3379                put_page(page);
3380        }
3381        if (name)
3382                putname(name);
3383        if (inode)
3384                inode_unlock(inode);
3385        if (!error)
3386                enable_swap_slots_cache();
3387        return error;
3388}
3389
3390void si_swapinfo(struct sysinfo *val)
3391{
3392        unsigned int type;
3393        unsigned long nr_to_be_unused = 0;
3394
3395        spin_lock(&swap_lock);
3396        for (type = 0; type < nr_swapfiles; type++) {
3397                struct swap_info_struct *si = swap_info[type];
3398
3399                if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3400                        nr_to_be_unused += si->inuse_pages;
3401        }
3402        val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3403        val->totalswap = total_swap_pages + nr_to_be_unused;
3404        spin_unlock(&swap_lock);
3405}
3406
3407/*
3408 * Verify that a swap entry is valid and increment its swap map count.
3409 *
3410 * Returns error code in following case.
3411 * - success -> 0
3412 * - swp_entry is invalid -> EINVAL
3413 * - swp_entry is migration entry -> EINVAL
3414 * - swap-cache reference is requested but there is already one. -> EEXIST
3415 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3416 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3417 */
3418static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3419{
3420        struct swap_info_struct *p;
3421        struct swap_cluster_info *ci;
3422        unsigned long offset;
3423        unsigned char count;
3424        unsigned char has_cache;
3425        int err;
3426
3427        p = get_swap_device(entry);
3428        if (!p)
3429                return -EINVAL;
3430
3431        offset = swp_offset(entry);
3432        ci = lock_cluster_or_swap_info(p, offset);
3433
3434        count = p->swap_map[offset];
3435
3436        /*
3437         * swapin_readahead() doesn't check if a swap entry is valid, so the
3438         * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3439         */
3440        if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3441                err = -ENOENT;
3442                goto unlock_out;
3443        }
3444
3445        has_cache = count & SWAP_HAS_CACHE;
3446        count &= ~SWAP_HAS_CACHE;
3447        err = 0;
3448
3449        if (usage == SWAP_HAS_CACHE) {
3450
3451                /* set SWAP_HAS_CACHE if there is no cache and entry is used */
3452                if (!has_cache && count)
3453                        has_cache = SWAP_HAS_CACHE;
3454                else if (has_cache)             /* someone else added cache */
3455                        err = -EEXIST;
3456                else                            /* no users remaining */
3457                        err = -ENOENT;
3458
3459        } else if (count || has_cache) {
3460
3461                if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3462                        count += usage;
3463                else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3464                        err = -EINVAL;
3465                else if (swap_count_continued(p, offset, count))
3466                        count = COUNT_CONTINUED;
3467                else
3468                        err = -ENOMEM;
3469        } else
3470                err = -ENOENT;                  /* unused swap entry */
3471
3472        WRITE_ONCE(p->swap_map[offset], count | has_cache);
3473
3474unlock_out:
3475        unlock_cluster_or_swap_info(p, ci);
3476        if (p)
3477                put_swap_device(p);
3478        return err;
3479}
3480
3481/*
3482 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3483 * (in which case its reference count is never incremented).
3484 */
3485void swap_shmem_alloc(swp_entry_t entry)
3486{
3487        __swap_duplicate(entry, SWAP_MAP_SHMEM);
3488}
3489
3490/*
3491 * Increase reference count of swap entry by 1.
3492 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3493 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3494 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3495 * might occur if a page table entry has got corrupted.
3496 */
3497int swap_duplicate(swp_entry_t entry)
3498{
3499        int err = 0;
3500
3501        while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3502                err = add_swap_count_continuation(entry, GFP_ATOMIC);
3503        return err;
3504}
3505
3506/*
3507 * @entry: swap entry for which we allocate swap cache.
3508 *
3509 * Called when allocating swap cache for existing swap entry,
3510 * This can return error codes. Returns 0 at success.
3511 * -EEXIST means there is a swap cache.
3512 * Note: return code is different from swap_duplicate().
3513 */
3514int swapcache_prepare(swp_entry_t entry)
3515{
3516        return __swap_duplicate(entry, SWAP_HAS_CACHE);
3517}
3518
3519struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3520{
3521        return swap_type_to_swap_info(swp_type(entry));
3522}
3523
3524struct swap_info_struct *page_swap_info(struct page *page)
3525{
3526        swp_entry_t entry = { .val = page_private(page) };
3527        return swp_swap_info(entry);
3528}
3529
3530/*
3531 * out-of-line __page_file_ methods to avoid include hell.
3532 */
3533struct address_space *__page_file_mapping(struct page *page)
3534{
3535        return page_swap_info(page)->swap_file->f_mapping;
3536}
3537EXPORT_SYMBOL_GPL(__page_file_mapping);
3538
3539pgoff_t __page_file_index(struct page *page)
3540{
3541        swp_entry_t swap = { .val = page_private(page) };
3542        return swp_offset(swap);
3543}
3544EXPORT_SYMBOL_GPL(__page_file_index);
3545
3546/*
3547 * add_swap_count_continuation - called when a swap count is duplicated
3548 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3549 * page of the original vmalloc'ed swap_map, to hold the continuation count
3550 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3551 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3552 *
3553 * These continuation pages are seldom referenced: the common paths all work
3554 * on the original swap_map, only referring to a continuation page when the
3555 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3556 *
3557 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3558 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3559 * can be called after dropping locks.
3560 */
3561int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3562{
3563        struct swap_info_struct *si;
3564        struct swap_cluster_info *ci;
3565        struct page *head;
3566        struct page *page;
3567        struct page *list_page;
3568        pgoff_t offset;
3569        unsigned char count;
3570        int ret = 0;
3571
3572        /*
3573         * When debugging, it's easier to use __GFP_ZERO here; but it's better
3574         * for latency not to zero a page while GFP_ATOMIC and holding locks.
3575         */
3576        page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3577
3578        si = get_swap_device(entry);
3579        if (!si) {
3580                /*
3581                 * An acceptable race has occurred since the failing
3582                 * __swap_duplicate(): the swap device may be swapoff
3583                 */
3584                goto outer;
3585        }
3586        spin_lock(&si->lock);
3587
3588        offset = swp_offset(entry);
3589
3590        ci = lock_cluster(si, offset);
3591
3592        count = swap_count(si->swap_map[offset]);
3593
3594        if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3595                /*
3596                 * The higher the swap count, the more likely it is that tasks
3597                 * will race to add swap count continuation: we need to avoid
3598                 * over-provisioning.
3599                 */
3600                goto out;
3601        }
3602
3603        if (!page) {
3604                ret = -ENOMEM;
3605                goto out;
3606        }
3607
3608        /*
3609         * We are fortunate that although vmalloc_to_page uses pte_offset_map,
3610         * no architecture is using highmem pages for kernel page tables: so it
3611         * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3612         */
3613        head = vmalloc_to_page(si->swap_map + offset);
3614        offset &= ~PAGE_MASK;
3615
3616        spin_lock(&si->cont_lock);
3617        /*
3618         * Page allocation does not initialize the page's lru field,
3619         * but it does always reset its private field.
3620         */
3621        if (!page_private(head)) {
3622                BUG_ON(count & COUNT_CONTINUED);
3623                INIT_LIST_HEAD(&head->lru);
3624                set_page_private(head, SWP_CONTINUED);
3625                si->flags |= SWP_CONTINUED;
3626        }
3627
3628        list_for_each_entry(list_page, &head->lru, lru) {
3629                unsigned char *map;
3630
3631                /*
3632                 * If the previous map said no continuation, but we've found
3633                 * a continuation page, free our allocation and use this one.
3634                 */
3635                if (!(count & COUNT_CONTINUED))
3636                        goto out_unlock_cont;
3637
3638                map = kmap_atomic(list_page) + offset;
3639                count = *map;
3640                kunmap_atomic(map);
3641
3642                /*
3643                 * If this continuation count now has some space in it,
3644                 * free our allocation and use this one.
3645                 */
3646                if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3647                        goto out_unlock_cont;
3648        }
3649
3650        list_add_tail(&page->lru, &head->lru);
3651        page = NULL;                    /* now it's attached, don't free it */
3652out_unlock_cont:
3653        spin_unlock(&si->cont_lock);
3654out:
3655        unlock_cluster(ci);
3656        spin_unlock(&si->lock);
3657        put_swap_device(si);
3658outer:
3659        if (page)
3660                __free_page(page);
3661        return ret;
3662}
3663
3664/*
3665 * swap_count_continued - when the original swap_map count is incremented
3666 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3667 * into, carry if so, or else fail until a new continuation page is allocated;
3668 * when the original swap_map count is decremented from 0 with continuation,
3669 * borrow from the continuation and report whether it still holds more.
3670 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3671 * lock.
3672 */
3673static bool swap_count_continued(struct swap_info_struct *si,
3674                                 pgoff_t offset, unsigned char count)
3675{
3676        struct page *head;
3677        struct page *page;
3678        unsigned char *map;
3679        bool ret;
3680
3681        head = vmalloc_to_page(si->swap_map + offset);
3682        if (page_private(head) != SWP_CONTINUED) {
3683                BUG_ON(count & COUNT_CONTINUED);
3684                return false;           /* need to add count continuation */
3685        }
3686
3687        spin_lock(&si->cont_lock);
3688        offset &= ~PAGE_MASK;
3689        page = list_next_entry(head, lru);
3690        map = kmap_atomic(page) + offset;
3691
3692        if (count == SWAP_MAP_MAX)      /* initial increment from swap_map */
3693                goto init_map;          /* jump over SWAP_CONT_MAX checks */
3694
3695        if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3696                /*
3697                 * Think of how you add 1 to 999
3698                 */
3699                while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3700                        kunmap_atomic(map);
3701                        page = list_next_entry(page, lru);
3702                        BUG_ON(page == head);
3703                        map = kmap_atomic(page) + offset;
3704                }
3705                if (*map == SWAP_CONT_MAX) {
3706                        kunmap_atomic(map);
3707                        page = list_next_entry(page, lru);
3708                        if (page == head) {
3709                                ret = false;    /* add count continuation */
3710                                goto out;
3711                        }
3712                        map = kmap_atomic(page) + offset;
3713init_map:               *map = 0;               /* we didn't zero the page */
3714                }
3715                *map += 1;
3716                kunmap_atomic(map);
3717                while ((page = list_prev_entry(page, lru)) != head) {
3718                        map = kmap_atomic(page) + offset;
3719                        *map = COUNT_CONTINUED;
3720                        kunmap_atomic(map);
3721                }
3722                ret = true;                     /* incremented */
3723
3724        } else {                                /* decrementing */
3725                /*
3726                 * Think of how you subtract 1 from 1000
3727                 */
3728                BUG_ON(count != COUNT_CONTINUED);
3729                while (*map == COUNT_CONTINUED) {
3730                        kunmap_atomic(map);
3731                        page = list_next_entry(page, lru);
3732                        BUG_ON(page == head);
3733                        map = kmap_atomic(page) + offset;
3734                }
3735                BUG_ON(*map == 0);
3736                *map -= 1;
3737                if (*map == 0)
3738                        count = 0;
3739                kunmap_atomic(map);
3740                while ((page = list_prev_entry(page, lru)) != head) {
3741                        map = kmap_atomic(page) + offset;
3742                        *map = SWAP_CONT_MAX | count;
3743                        count = COUNT_CONTINUED;
3744                        kunmap_atomic(map);
3745                }
3746                ret = count == COUNT_CONTINUED;
3747        }
3748out:
3749        spin_unlock(&si->cont_lock);
3750        return ret;
3751}
3752
3753/*
3754 * free_swap_count_continuations - swapoff free all the continuation pages
3755 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3756 */
3757static void free_swap_count_continuations(struct swap_info_struct *si)
3758{
3759        pgoff_t offset;
3760
3761        for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3762                struct page *head;
3763                head = vmalloc_to_page(si->swap_map + offset);
3764                if (page_private(head)) {
3765                        struct page *page, *next;
3766
3767                        list_for_each_entry_safe(page, next, &head->lru, lru) {
3768                                list_del(&page->lru);
3769                                __free_page(page);
3770                        }
3771                }
3772        }
3773}
3774
3775#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3776void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
3777{
3778        struct swap_info_struct *si, *next;
3779        int nid = page_to_nid(page);
3780
3781        if (!(gfp_mask & __GFP_IO))
3782                return;
3783
3784        if (!blk_cgroup_congested())
3785                return;
3786
3787        /*
3788         * We've already scheduled a throttle, avoid taking the global swap
3789         * lock.
3790         */
3791        if (current->throttle_queue)
3792                return;
3793
3794        spin_lock(&swap_avail_lock);
3795        plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3796                                  avail_lists[nid]) {
3797                if (si->bdev) {
3798                        blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
3799                        break;
3800                }
3801        }
3802        spin_unlock(&swap_avail_lock);
3803}
3804#endif
3805
3806static int __init swapfile_init(void)
3807{
3808        int nid;
3809
3810        swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3811                                         GFP_KERNEL);
3812        if (!swap_avail_heads) {
3813                pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3814                return -ENOMEM;
3815        }
3816
3817        for_each_node(nid)
3818                plist_head_init(&swap_avail_heads[nid]);
3819
3820        return 0;
3821}
3822subsys_initcall(swapfile_init);
3823