linux/mm/swapfile.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/swapfile.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *  Swap reorganised 29.12.95, Stephen Tweedie
   7 */
   8
   9#include <linux/blkdev.h>
  10#include <linux/mm.h>
  11#include <linux/sched/mm.h>
  12#include <linux/sched/task.h>
  13#include <linux/hugetlb.h>
  14#include <linux/mman.h>
  15#include <linux/slab.h>
  16#include <linux/kernel_stat.h>
  17#include <linux/swap.h>
  18#include <linux/vmalloc.h>
  19#include <linux/pagemap.h>
  20#include <linux/namei.h>
  21#include <linux/shmem_fs.h>
  22#include <linux/blk-cgroup.h>
  23#include <linux/random.h>
  24#include <linux/writeback.h>
  25#include <linux/proc_fs.h>
  26#include <linux/seq_file.h>
  27#include <linux/init.h>
  28#include <linux/ksm.h>
  29#include <linux/rmap.h>
  30#include <linux/security.h>
  31#include <linux/backing-dev.h>
  32#include <linux/mutex.h>
  33#include <linux/capability.h>
  34#include <linux/syscalls.h>
  35#include <linux/memcontrol.h>
  36#include <linux/poll.h>
  37#include <linux/oom.h>
  38#include <linux/swapfile.h>
  39#include <linux/export.h>
  40#include <linux/sort.h>
  41#include <linux/completion.h>
  42#include <linux/suspend.h>
  43#include <linux/zswap.h>
  44#include <linux/plist.h>
  45
  46#include <asm/tlbflush.h>
  47#include <linux/swapops.h>
  48#include <linux/swap_cgroup.h>
  49#include "internal.h"
  50#include "swap.h"
  51
  52static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  53                                 unsigned char);
  54static void free_swap_count_continuations(struct swap_info_struct *);
  55static void swap_entries_free(struct swap_info_struct *si,
  56                              struct swap_cluster_info *ci,
  57                              swp_entry_t entry, unsigned int nr_pages);
  58static void swap_range_alloc(struct swap_info_struct *si,
  59                             unsigned int nr_entries);
  60static bool folio_swapcache_freeable(struct folio *folio);
  61static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
  62                                              unsigned long offset);
  63static inline void unlock_cluster(struct swap_cluster_info *ci);
  64
  65static DEFINE_SPINLOCK(swap_lock);
  66static unsigned int nr_swapfiles;
  67atomic_long_t nr_swap_pages;
  68/*
  69 * Some modules use swappable objects and may try to swap them out under
  70 * memory pressure (via the shrinker). Before doing so, they may wish to
  71 * check to see if any swap space is available.
  72 */
  73EXPORT_SYMBOL_GPL(nr_swap_pages);
  74/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  75long total_swap_pages;
  76static int least_priority = -1;
  77unsigned long swapfile_maximum_size;
  78#ifdef CONFIG_MIGRATION
  79bool swap_migration_ad_supported;
  80#endif  /* CONFIG_MIGRATION */
  81
  82static const char Bad_file[] = "Bad swap file entry ";
  83static const char Unused_file[] = "Unused swap file entry ";
  84static const char Bad_offset[] = "Bad swap offset entry ";
  85static const char Unused_offset[] = "Unused swap offset entry ";
  86
  87/*
  88 * all active swap_info_structs
  89 * protected with swap_lock, and ordered by priority.
  90 */
  91static PLIST_HEAD(swap_active_head);
  92
  93/*
  94 * all available (active, not full) swap_info_structs
  95 * protected with swap_avail_lock, ordered by priority.
  96 * This is used by folio_alloc_swap() instead of swap_active_head
  97 * because swap_active_head includes all swap_info_structs,
  98 * but folio_alloc_swap() doesn't need to look at full ones.
  99 * This uses its own lock instead of swap_lock because when a
 100 * swap_info_struct changes between not-full/full, it needs to
 101 * add/remove itself to/from this list, but the swap_info_struct->lock
 102 * is held and the locking order requires swap_lock to be taken
 103 * before any swap_info_struct->lock.
 104 */
 105static struct plist_head *swap_avail_heads;
 106static DEFINE_SPINLOCK(swap_avail_lock);
 107
 108static struct swap_info_struct *swap_info[MAX_SWAPFILES];
 109
 110static DEFINE_MUTEX(swapon_mutex);
 111
 112static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
 113/* Activity counter to indicate that a swapon or swapoff has occurred */
 114static atomic_t proc_poll_event = ATOMIC_INIT(0);
 115
 116atomic_t nr_rotate_swap = ATOMIC_INIT(0);
 117
 118struct percpu_swap_cluster {
 119        struct swap_info_struct *si[SWAP_NR_ORDERS];
 120        unsigned long offset[SWAP_NR_ORDERS];
 121        local_lock_t lock;
 122};
 123
 124static DEFINE_PER_CPU(struct percpu_swap_cluster, percpu_swap_cluster) = {
 125        .si = { NULL },
 126        .offset = { SWAP_ENTRY_INVALID },
 127        .lock = INIT_LOCAL_LOCK(),
 128};
 129
 130static struct swap_info_struct *swap_type_to_swap_info(int type)
 131{
 132        if (type >= MAX_SWAPFILES)
 133                return NULL;
 134
 135        return READ_ONCE(swap_info[type]); /* rcu_dereference() */
 136}
 137
 138static inline unsigned char swap_count(unsigned char ent)
 139{
 140        return ent & ~SWAP_HAS_CACHE;   /* may include COUNT_CONTINUED flag */
 141}
 142
 143/*
 144 * Use the second highest bit of inuse_pages counter as the indicator
 145 * if one swap device is on the available plist, so the atomic can
 146 * still be updated arithmetically while having special data embedded.
 147 *
 148 * inuse_pages counter is the only thing indicating if a device should
 149 * be on avail_lists or not (except swapon / swapoff). By embedding the
 150 * off-list bit in the atomic counter, updates no longer need any lock
 151 * to check the list status.
 152 *
 153 * This bit will be set if the device is not on the plist and not
 154 * usable, will be cleared if the device is on the plist.
 155 */
 156#define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2))
 157#define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT)
 158static long swap_usage_in_pages(struct swap_info_struct *si)
 159{
 160        return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK;
 161}
 162
 163/* Reclaim the swap entry anyway if possible */
 164#define TTRS_ANYWAY             0x1
 165/*
 166 * Reclaim the swap entry if there are no more mappings of the
 167 * corresponding page
 168 */
 169#define TTRS_UNMAPPED           0x2
 170/* Reclaim the swap entry if swap is getting full */
 171#define TTRS_FULL               0x4
 172
 173static bool swap_only_has_cache(struct swap_info_struct *si,
 174                              unsigned long offset, int nr_pages)
 175{
 176        unsigned char *map = si->swap_map + offset;
 177        unsigned char *map_end = map + nr_pages;
 178
 179        do {
 180                VM_BUG_ON(!(*map & SWAP_HAS_CACHE));
 181                if (*map != SWAP_HAS_CACHE)
 182                        return false;
 183        } while (++map < map_end);
 184
 185        return true;
 186}
 187
 188static bool swap_is_last_map(struct swap_info_struct *si,
 189                unsigned long offset, int nr_pages, bool *has_cache)
 190{
 191        unsigned char *map = si->swap_map + offset;
 192        unsigned char *map_end = map + nr_pages;
 193        unsigned char count = *map;
 194
 195        if (swap_count(count) != 1 && swap_count(count) != SWAP_MAP_SHMEM)
 196                return false;
 197
 198        while (++map < map_end) {
 199                if (*map != count)
 200                        return false;
 201        }
 202
 203        *has_cache = !!(count & SWAP_HAS_CACHE);
 204        return true;
 205}
 206
 207/*
 208 * returns number of pages in the folio that backs the swap entry. If positive,
 209 * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no
 210 * folio was associated with the swap entry.
 211 */
 212static int __try_to_reclaim_swap(struct swap_info_struct *si,
 213                                 unsigned long offset, unsigned long flags)
 214{
 215        swp_entry_t entry = swp_entry(si->type, offset);
 216        struct address_space *address_space = swap_address_space(entry);
 217        struct swap_cluster_info *ci;
 218        struct folio *folio;
 219        int ret, nr_pages;
 220        bool need_reclaim;
 221
 222again:
 223        folio = filemap_get_folio(address_space, swap_cache_index(entry));
 224        if (IS_ERR(folio))
 225                return 0;
 226
 227        nr_pages = folio_nr_pages(folio);
 228        ret = -nr_pages;
 229
 230        /*
 231         * When this function is called from scan_swap_map_slots() and it's
 232         * called by vmscan.c at reclaiming folios. So we hold a folio lock
 233         * here. We have to use trylock for avoiding deadlock. This is a special
 234         * case and you should use folio_free_swap() with explicit folio_lock()
 235         * in usual operations.
 236         */
 237        if (!folio_trylock(folio))
 238                goto out;
 239
 240        /*
 241         * Offset could point to the middle of a large folio, or folio
 242         * may no longer point to the expected offset before it's locked.
 243         */
 244        entry = folio->swap;
 245        if (offset < swp_offset(entry) || offset >= swp_offset(entry) + nr_pages) {
 246                folio_unlock(folio);
 247                folio_put(folio);
 248                goto again;
 249        }
 250        offset = swp_offset(entry);
 251
 252        need_reclaim = ((flags & TTRS_ANYWAY) ||
 253                        ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
 254                        ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)));
 255        if (!need_reclaim || !folio_swapcache_freeable(folio))
 256                goto out_unlock;
 257
 258        /*
 259         * It's safe to delete the folio from swap cache only if the folio's
 260         * swap_map is HAS_CACHE only, which means the slots have no page table
 261         * reference or pending writeback, and can't be allocated to others.
 262         */
 263        ci = lock_cluster(si, offset);
 264        need_reclaim = swap_only_has_cache(si, offset, nr_pages);
 265        unlock_cluster(ci);
 266        if (!need_reclaim)
 267                goto out_unlock;
 268
 269        delete_from_swap_cache(folio);
 270        folio_set_dirty(folio);
 271        ret = nr_pages;
 272out_unlock:
 273        folio_unlock(folio);
 274out:
 275        folio_put(folio);
 276        return ret;
 277}
 278
 279static inline struct swap_extent *first_se(struct swap_info_struct *sis)
 280{
 281        struct rb_node *rb = rb_first(&sis->swap_extent_root);
 282        return rb_entry(rb, struct swap_extent, rb_node);
 283}
 284
 285static inline struct swap_extent *next_se(struct swap_extent *se)
 286{
 287        struct rb_node *rb = rb_next(&se->rb_node);
 288        return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
 289}
 290
 291/*
 292 * swapon tell device that all the old swap contents can be discarded,
 293 * to allow the swap device to optimize its wear-levelling.
 294 */
 295static int discard_swap(struct swap_info_struct *si)
 296{
 297        struct swap_extent *se;
 298        sector_t start_block;
 299        sector_t nr_blocks;
 300        int err = 0;
 301
 302        /* Do not discard the swap header page! */
 303        se = first_se(si);
 304        start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 305        nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 306        if (nr_blocks) {
 307                err = blkdev_issue_discard(si->bdev, start_block,
 308                                nr_blocks, GFP_KERNEL);
 309                if (err)
 310                        return err;
 311                cond_resched();
 312        }
 313
 314        for (se = next_se(se); se; se = next_se(se)) {
 315                start_block = se->start_block << (PAGE_SHIFT - 9);
 316                nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 317
 318                err = blkdev_issue_discard(si->bdev, start_block,
 319                                nr_blocks, GFP_KERNEL);
 320                if (err)
 321                        break;
 322
 323                cond_resched();
 324        }
 325        return err;             /* That will often be -EOPNOTSUPP */
 326}
 327
 328static struct swap_extent *
 329offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
 330{
 331        struct swap_extent *se;
 332        struct rb_node *rb;
 333
 334        rb = sis->swap_extent_root.rb_node;
 335        while (rb) {
 336                se = rb_entry(rb, struct swap_extent, rb_node);
 337                if (offset < se->start_page)
 338                        rb = rb->rb_left;
 339                else if (offset >= se->start_page + se->nr_pages)
 340                        rb = rb->rb_right;
 341                else
 342                        return se;
 343        }
 344        /* It *must* be present */
 345        BUG();
 346}
 347
 348sector_t swap_folio_sector(struct folio *folio)
 349{
 350        struct swap_info_struct *sis = swp_swap_info(folio->swap);
 351        struct swap_extent *se;
 352        sector_t sector;
 353        pgoff_t offset;
 354
 355        offset = swp_offset(folio->swap);
 356        se = offset_to_swap_extent(sis, offset);
 357        sector = se->start_block + (offset - se->start_page);
 358        return sector << (PAGE_SHIFT - 9);
 359}
 360
 361/*
 362 * swap allocation tell device that a cluster of swap can now be discarded,
 363 * to allow the swap device to optimize its wear-levelling.
 364 */
 365static void discard_swap_cluster(struct swap_info_struct *si,
 366                                 pgoff_t start_page, pgoff_t nr_pages)
 367{
 368        struct swap_extent *se = offset_to_swap_extent(si, start_page);
 369
 370        while (nr_pages) {
 371                pgoff_t offset = start_page - se->start_page;
 372                sector_t start_block = se->start_block + offset;
 373                sector_t nr_blocks = se->nr_pages - offset;
 374
 375                if (nr_blocks > nr_pages)
 376                        nr_blocks = nr_pages;
 377                start_page += nr_blocks;
 378                nr_pages -= nr_blocks;
 379
 380                start_block <<= PAGE_SHIFT - 9;
 381                nr_blocks <<= PAGE_SHIFT - 9;
 382                if (blkdev_issue_discard(si->bdev, start_block,
 383                                        nr_blocks, GFP_NOIO))
 384                        break;
 385
 386                se = next_se(se);
 387        }
 388}
 389
 390#ifdef CONFIG_THP_SWAP
 391#define SWAPFILE_CLUSTER        HPAGE_PMD_NR
 392
 393#define swap_entry_order(order) (order)
 394#else
 395#define SWAPFILE_CLUSTER        256
 396
 397/*
 398 * Define swap_entry_order() as constant to let compiler to optimize
 399 * out some code if !CONFIG_THP_SWAP
 400 */
 401#define swap_entry_order(order) 0
 402#endif
 403#define LATENCY_LIMIT           256
 404
 405static inline bool cluster_is_empty(struct swap_cluster_info *info)
 406{
 407        return info->count == 0;
 408}
 409
 410static inline bool cluster_is_discard(struct swap_cluster_info *info)
 411{
 412        return info->flags == CLUSTER_FLAG_DISCARD;
 413}
 414
 415static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order)
 416{
 417        if (unlikely(ci->flags > CLUSTER_FLAG_USABLE))
 418                return false;
 419        if (!order)
 420                return true;
 421        return cluster_is_empty(ci) || order == ci->order;
 422}
 423
 424static inline unsigned int cluster_index(struct swap_info_struct *si,
 425                                         struct swap_cluster_info *ci)
 426{
 427        return ci - si->cluster_info;
 428}
 429
 430static inline struct swap_cluster_info *offset_to_cluster(struct swap_info_struct *si,
 431                                                          unsigned long offset)
 432{
 433        return &si->cluster_info[offset / SWAPFILE_CLUSTER];
 434}
 435
 436static inline unsigned int cluster_offset(struct swap_info_struct *si,
 437                                          struct swap_cluster_info *ci)
 438{
 439        return cluster_index(si, ci) * SWAPFILE_CLUSTER;
 440}
 441
 442static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
 443                                                     unsigned long offset)
 444{
 445        struct swap_cluster_info *ci;
 446
 447        ci = offset_to_cluster(si, offset);
 448        spin_lock(&ci->lock);
 449
 450        return ci;
 451}
 452
 453static inline void unlock_cluster(struct swap_cluster_info *ci)
 454{
 455        spin_unlock(&ci->lock);
 456}
 457
 458static void move_cluster(struct swap_info_struct *si,
 459                         struct swap_cluster_info *ci, struct list_head *list,
 460                         enum swap_cluster_flags new_flags)
 461{
 462        VM_WARN_ON(ci->flags == new_flags);
 463
 464        BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX);
 465        lockdep_assert_held(&ci->lock);
 466
 467        spin_lock(&si->lock);
 468        if (ci->flags == CLUSTER_FLAG_NONE)
 469                list_add_tail(&ci->list, list);
 470        else
 471                list_move_tail(&ci->list, list);
 472        spin_unlock(&si->lock);
 473
 474        if (ci->flags == CLUSTER_FLAG_FRAG)
 475                atomic_long_dec(&si->frag_cluster_nr[ci->order]);
 476        else if (new_flags == CLUSTER_FLAG_FRAG)
 477                atomic_long_inc(&si->frag_cluster_nr[ci->order]);
 478        ci->flags = new_flags;
 479}
 480
 481/* Add a cluster to discard list and schedule it to do discard */
 482static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 483                struct swap_cluster_info *ci)
 484{
 485        VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
 486        move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD);
 487        schedule_work(&si->discard_work);
 488}
 489
 490static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
 491{
 492        lockdep_assert_held(&ci->lock);
 493        move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
 494        ci->order = 0;
 495}
 496
 497/*
 498 * Isolate and lock the first cluster that is not contented on a list,
 499 * clean its flag before taken off-list. Cluster flag must be in sync
 500 * with list status, so cluster updaters can always know the cluster
 501 * list status without touching si lock.
 502 *
 503 * Note it's possible that all clusters on a list are contented so
 504 * this returns NULL for an non-empty list.
 505 */
 506static struct swap_cluster_info *isolate_lock_cluster(
 507                struct swap_info_struct *si, struct list_head *list)
 508{
 509        struct swap_cluster_info *ci, *ret = NULL;
 510
 511        spin_lock(&si->lock);
 512
 513        if (unlikely(!(si->flags & SWP_WRITEOK)))
 514                goto out;
 515
 516        list_for_each_entry(ci, list, list) {
 517                if (!spin_trylock(&ci->lock))
 518                        continue;
 519
 520                /* We may only isolate and clear flags of following lists */
 521                VM_BUG_ON(!ci->flags);
 522                VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE &&
 523                          ci->flags != CLUSTER_FLAG_FULL);
 524
 525                list_del(&ci->list);
 526                ci->flags = CLUSTER_FLAG_NONE;
 527                ret = ci;
 528                break;
 529        }
 530out:
 531        spin_unlock(&si->lock);
 532
 533        return ret;
 534}
 535
 536/*
 537 * Doing discard actually. After a cluster discard is finished, the cluster
 538 * will be added to free cluster list. Discard cluster is a bit special as
 539 * they don't participate in allocation or reclaim, so clusters marked as
 540 * CLUSTER_FLAG_DISCARD must remain off-list or on discard list.
 541 */
 542static bool swap_do_scheduled_discard(struct swap_info_struct *si)
 543{
 544        struct swap_cluster_info *ci;
 545        bool ret = false;
 546        unsigned int idx;
 547
 548        spin_lock(&si->lock);
 549        while (!list_empty(&si->discard_clusters)) {
 550                ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
 551                /*
 552                 * Delete the cluster from list to prepare for discard, but keep
 553                 * the CLUSTER_FLAG_DISCARD flag, percpu_swap_cluster could be
 554                 * pointing to it, or ran into by relocate_cluster.
 555                 */
 556                list_del(&ci->list);
 557                idx = cluster_index(si, ci);
 558                spin_unlock(&si->lock);
 559                discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 560                                SWAPFILE_CLUSTER);
 561
 562                spin_lock(&ci->lock);
 563                /*
 564                 * Discard is done, clear its flags as it's off-list, then
 565                 * return the cluster to allocation list.
 566                 */
 567                ci->flags = CLUSTER_FLAG_NONE;
 568                __free_cluster(si, ci);
 569                spin_unlock(&ci->lock);
 570                ret = true;
 571                spin_lock(&si->lock);
 572        }
 573        spin_unlock(&si->lock);
 574        return ret;
 575}
 576
 577static void swap_discard_work(struct work_struct *work)
 578{
 579        struct swap_info_struct *si;
 580
 581        si = container_of(work, struct swap_info_struct, discard_work);
 582
 583        swap_do_scheduled_discard(si);
 584}
 585
 586static void swap_users_ref_free(struct percpu_ref *ref)
 587{
 588        struct swap_info_struct *si;
 589
 590        si = container_of(ref, struct swap_info_struct, users);
 591        complete(&si->comp);
 592}
 593
 594/*
 595 * Must be called after freeing if ci->count == 0, moves the cluster to free
 596 * or discard list.
 597 */
 598static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
 599{
 600        VM_BUG_ON(ci->count != 0);
 601        VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
 602        lockdep_assert_held(&ci->lock);
 603
 604        /*
 605         * If the swap is discardable, prepare discard the cluster
 606         * instead of free it immediately. The cluster will be freed
 607         * after discard.
 608         */
 609        if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 610            (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 611                swap_cluster_schedule_discard(si, ci);
 612                return;
 613        }
 614
 615        __free_cluster(si, ci);
 616}
 617
 618/*
 619 * Must be called after freeing if ci->count != 0, moves the cluster to
 620 * nonfull list.
 621 */
 622static void partial_free_cluster(struct swap_info_struct *si,
 623                                 struct swap_cluster_info *ci)
 624{
 625        VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER);
 626        lockdep_assert_held(&ci->lock);
 627
 628        if (ci->flags != CLUSTER_FLAG_NONFULL)
 629                move_cluster(si, ci, &si->nonfull_clusters[ci->order],
 630                             CLUSTER_FLAG_NONFULL);
 631}
 632
 633/*
 634 * Must be called after allocation, moves the cluster to full or frag list.
 635 * Note: allocation doesn't acquire si lock, and may drop the ci lock for
 636 * reclaim, so the cluster could be any where when called.
 637 */
 638static void relocate_cluster(struct swap_info_struct *si,
 639                             struct swap_cluster_info *ci)
 640{
 641        lockdep_assert_held(&ci->lock);
 642
 643        /* Discard cluster must remain off-list or on discard list */
 644        if (cluster_is_discard(ci))
 645                return;
 646
 647        if (!ci->count) {
 648                if (ci->flags != CLUSTER_FLAG_FREE)
 649                        free_cluster(si, ci);
 650        } else if (ci->count != SWAPFILE_CLUSTER) {
 651                if (ci->flags != CLUSTER_FLAG_FRAG)
 652                        move_cluster(si, ci, &si->frag_clusters[ci->order],
 653                                     CLUSTER_FLAG_FRAG);
 654        } else {
 655                if (ci->flags != CLUSTER_FLAG_FULL)
 656                        move_cluster(si, ci, &si->full_clusters,
 657                                     CLUSTER_FLAG_FULL);
 658        }
 659}
 660
 661/*
 662 * The cluster corresponding to page_nr will be used. The cluster will not be
 663 * added to free cluster list and its usage counter will be increased by 1.
 664 * Only used for initialization.
 665 */
 666static void inc_cluster_info_page(struct swap_info_struct *si,
 667        struct swap_cluster_info *cluster_info, unsigned long page_nr)
 668{
 669        unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 670        struct swap_cluster_info *ci;
 671
 672        ci = cluster_info + idx;
 673        ci->count++;
 674
 675        VM_BUG_ON(ci->count > SWAPFILE_CLUSTER);
 676        VM_BUG_ON(ci->flags);
 677}
 678
 679static bool cluster_reclaim_range(struct swap_info_struct *si,
 680                                  struct swap_cluster_info *ci,
 681                                  unsigned long start, unsigned long end)
 682{
 683        unsigned char *map = si->swap_map;
 684        unsigned long offset = start;
 685        int nr_reclaim;
 686
 687        spin_unlock(&ci->lock);
 688        do {
 689                switch (READ_ONCE(map[offset])) {
 690                case 0:
 691                        offset++;
 692                        break;
 693                case SWAP_HAS_CACHE:
 694                        nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
 695                        if (nr_reclaim > 0)
 696                                offset += nr_reclaim;
 697                        else
 698                                goto out;
 699                        break;
 700                default:
 701                        goto out;
 702                }
 703        } while (offset < end);
 704out:
 705        spin_lock(&ci->lock);
 706        /*
 707         * Recheck the range no matter reclaim succeeded or not, the slot
 708         * could have been be freed while we are not holding the lock.
 709         */
 710        for (offset = start; offset < end; offset++)
 711                if (READ_ONCE(map[offset]))
 712                        return false;
 713
 714        return true;
 715}
 716
 717static bool cluster_scan_range(struct swap_info_struct *si,
 718                               struct swap_cluster_info *ci,
 719                               unsigned long start, unsigned int nr_pages,
 720                               bool *need_reclaim)
 721{
 722        unsigned long offset, end = start + nr_pages;
 723        unsigned char *map = si->swap_map;
 724
 725        if (cluster_is_empty(ci))
 726                return true;
 727
 728        for (offset = start; offset < end; offset++) {
 729                switch (READ_ONCE(map[offset])) {
 730                case 0:
 731                        continue;
 732                case SWAP_HAS_CACHE:
 733                        if (!vm_swap_full())
 734                                return false;
 735                        *need_reclaim = true;
 736                        continue;
 737                default:
 738                        return false;
 739                }
 740        }
 741
 742        return true;
 743}
 744
 745static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
 746                                unsigned int start, unsigned char usage,
 747                                unsigned int order)
 748{
 749        unsigned int nr_pages = 1 << order;
 750
 751        lockdep_assert_held(&ci->lock);
 752
 753        if (!(si->flags & SWP_WRITEOK))
 754                return false;
 755
 756        /*
 757         * The first allocation in a cluster makes the
 758         * cluster exclusive to this order
 759         */
 760        if (cluster_is_empty(ci))
 761                ci->order = order;
 762
 763        memset(si->swap_map + start, usage, nr_pages);
 764        swap_range_alloc(si, nr_pages);
 765        ci->count += nr_pages;
 766
 767        return true;
 768}
 769
 770/* Try use a new cluster for current CPU and allocate from it. */
 771static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
 772                                            struct swap_cluster_info *ci,
 773                                            unsigned long offset,
 774                                            unsigned int order,
 775                                            unsigned char usage)
 776{
 777        unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
 778        unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER);
 779        unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
 780        unsigned int nr_pages = 1 << order;
 781        bool need_reclaim, ret;
 782
 783        lockdep_assert_held(&ci->lock);
 784
 785        if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER)
 786                goto out;
 787
 788        for (end -= nr_pages; offset <= end; offset += nr_pages) {
 789                need_reclaim = false;
 790                if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim))
 791                        continue;
 792                if (need_reclaim) {
 793                        ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages);
 794                        /*
 795                         * Reclaim drops ci->lock and cluster could be used
 796                         * by another order. Not checking flag as off-list
 797                         * cluster has no flag set, and change of list
 798                         * won't cause fragmentation.
 799                         */
 800                        if (!cluster_is_usable(ci, order))
 801                                goto out;
 802                        if (cluster_is_empty(ci))
 803                                offset = start;
 804                        /* Reclaim failed but cluster is usable, try next */
 805                        if (!ret)
 806                                continue;
 807                }
 808                if (!cluster_alloc_range(si, ci, offset, usage, order))
 809                        break;
 810                found = offset;
 811                offset += nr_pages;
 812                if (ci->count < SWAPFILE_CLUSTER && offset <= end)
 813                        next = offset;
 814                break;
 815        }
 816out:
 817        relocate_cluster(si, ci);
 818        unlock_cluster(ci);
 819        if (si->flags & SWP_SOLIDSTATE) {
 820                this_cpu_write(percpu_swap_cluster.offset[order], next);
 821                this_cpu_write(percpu_swap_cluster.si[order], si);
 822        } else {
 823                si->global_cluster->next[order] = next;
 824        }
 825        return found;
 826}
 827
 828static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
 829{
 830        long to_scan = 1;
 831        unsigned long offset, end;
 832        struct swap_cluster_info *ci;
 833        unsigned char *map = si->swap_map;
 834        int nr_reclaim;
 835
 836        if (force)
 837                to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER;
 838
 839        while ((ci = isolate_lock_cluster(si, &si->full_clusters))) {
 840                offset = cluster_offset(si, ci);
 841                end = min(si->max, offset + SWAPFILE_CLUSTER);
 842                to_scan--;
 843
 844                while (offset < end) {
 845                        if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
 846                                spin_unlock(&ci->lock);
 847                                nr_reclaim = __try_to_reclaim_swap(si, offset,
 848                                                                   TTRS_ANYWAY);
 849                                spin_lock(&ci->lock);
 850                                if (nr_reclaim) {
 851                                        offset += abs(nr_reclaim);
 852                                        continue;
 853                                }
 854                        }
 855                        offset++;
 856                }
 857
 858                /* in case no swap cache is reclaimed */
 859                if (ci->flags == CLUSTER_FLAG_NONE)
 860                        relocate_cluster(si, ci);
 861
 862                unlock_cluster(ci);
 863                if (to_scan <= 0)
 864                        break;
 865        }
 866}
 867
 868static void swap_reclaim_work(struct work_struct *work)
 869{
 870        struct swap_info_struct *si;
 871
 872        si = container_of(work, struct swap_info_struct, reclaim_work);
 873
 874        swap_reclaim_full_clusters(si, true);
 875}
 876
 877/*
 878 * Try to allocate swap entries with specified order and try set a new
 879 * cluster for current CPU too.
 880 */
 881static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
 882                                              unsigned char usage)
 883{
 884        struct swap_cluster_info *ci;
 885        unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
 886
 887        /*
 888         * Swapfile is not block device so unable
 889         * to allocate large entries.
 890         */
 891        if (order && !(si->flags & SWP_BLKDEV))
 892                return 0;
 893
 894        if (!(si->flags & SWP_SOLIDSTATE)) {
 895                /* Serialize HDD SWAP allocation for each device. */
 896                spin_lock(&si->global_cluster_lock);
 897                offset = si->global_cluster->next[order];
 898                if (offset == SWAP_ENTRY_INVALID)
 899                        goto new_cluster;
 900
 901                ci = lock_cluster(si, offset);
 902                /* Cluster could have been used by another order */
 903                if (cluster_is_usable(ci, order)) {
 904                        if (cluster_is_empty(ci))
 905                                offset = cluster_offset(si, ci);
 906                        found = alloc_swap_scan_cluster(si, ci, offset,
 907                                                        order, usage);
 908                } else {
 909                        unlock_cluster(ci);
 910                }
 911                if (found)
 912                        goto done;
 913        }
 914
 915new_cluster:
 916        ci = isolate_lock_cluster(si, &si->free_clusters);
 917        if (ci) {
 918                found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
 919                                                order, usage);
 920                if (found)
 921                        goto done;
 922        }
 923
 924        /* Try reclaim from full clusters if free clusters list is drained */
 925        if (vm_swap_full())
 926                swap_reclaim_full_clusters(si, false);
 927
 928        if (order < PMD_ORDER) {
 929                unsigned int frags = 0, frags_existing;
 930
 931                while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) {
 932                        found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
 933                                                        order, usage);
 934                        if (found)
 935                                goto done;
 936                        /* Clusters failed to allocate are moved to frag_clusters */
 937                        frags++;
 938                }
 939
 940                frags_existing = atomic_long_read(&si->frag_cluster_nr[order]);
 941                while (frags < frags_existing &&
 942                       (ci = isolate_lock_cluster(si, &si->frag_clusters[order]))) {
 943                        atomic_long_dec(&si->frag_cluster_nr[order]);
 944                        /*
 945                         * Rotate the frag list to iterate, they were all
 946                         * failing high order allocation or moved here due to
 947                         * per-CPU usage, but they could contain newly released
 948                         * reclaimable (eg. lazy-freed swap cache) slots.
 949                         */
 950                        found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
 951                                                        order, usage);
 952                        if (found)
 953                                goto done;
 954                        frags++;
 955                }
 956        }
 957
 958        /*
 959         * We don't have free cluster but have some clusters in discarding,
 960         * do discard now and reclaim them.
 961         */
 962        if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si))
 963                goto new_cluster;
 964
 965        if (order)
 966                goto done;
 967
 968        /* Order 0 stealing from higher order */
 969        for (int o = 1; o < SWAP_NR_ORDERS; o++) {
 970                /*
 971                 * Clusters here have at least one usable slots and can't fail order 0
 972                 * allocation, but reclaim may drop si->lock and race with another user.
 973                 */
 974                while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) {
 975                        atomic_long_dec(&si->frag_cluster_nr[o]);
 976                        found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
 977                                                        0, usage);
 978                        if (found)
 979                                goto done;
 980                }
 981
 982                while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) {
 983                        found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
 984                                                        0, usage);
 985                        if (found)
 986                                goto done;
 987                }
 988        }
 989done:
 990        if (!(si->flags & SWP_SOLIDSTATE))
 991                spin_unlock(&si->global_cluster_lock);
 992        return found;
 993}
 994
 995/* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */
 996static void del_from_avail_list(struct swap_info_struct *si, bool swapoff)
 997{
 998        int nid;
 999        unsigned long pages;
1000
1001        spin_lock(&swap_avail_lock);
1002
1003        if (swapoff) {
1004                /*
1005                 * Forcefully remove it. Clear the SWP_WRITEOK flags for
1006                 * swapoff here so it's synchronized by both si->lock and
1007                 * swap_avail_lock, to ensure the result can be seen by
1008                 * add_to_avail_list.
1009                 */
1010                lockdep_assert_held(&si->lock);
1011                si->flags &= ~SWP_WRITEOK;
1012                atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
1013        } else {
1014                /*
1015                 * If not called by swapoff, take it off-list only if it's
1016                 * full and SWAP_USAGE_OFFLIST_BIT is not set (strictly
1017                 * si->inuse_pages == pages), any concurrent slot freeing,
1018                 * or device already removed from plist by someone else
1019                 * will make this return false.
1020                 */
1021                pages = si->pages;
1022                if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
1023                                             pages | SWAP_USAGE_OFFLIST_BIT))
1024                        goto skip;
1025        }
1026
1027        for_each_node(nid)
1028                plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
1029
1030skip:
1031        spin_unlock(&swap_avail_lock);
1032}
1033
1034/* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */
1035static void add_to_avail_list(struct swap_info_struct *si, bool swapon)
1036{
1037        int nid;
1038        long val;
1039        unsigned long pages;
1040
1041        spin_lock(&swap_avail_lock);
1042
1043        /* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */
1044        if (swapon) {
1045                lockdep_assert_held(&si->lock);
1046                si->flags |= SWP_WRITEOK;
1047        } else {
1048                if (!(READ_ONCE(si->flags) & SWP_WRITEOK))
1049                        goto skip;
1050        }
1051
1052        if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT))
1053                goto skip;
1054
1055        val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
1056
1057        /*
1058         * When device is full and device is on the plist, only one updater will
1059         * see (inuse_pages == si->pages) and will call del_from_avail_list. If
1060         * that updater happen to be here, just skip adding.
1061         */
1062        pages = si->pages;
1063        if (val == pages) {
1064                /* Just like the cmpxchg in del_from_avail_list */
1065                if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
1066                                            pages | SWAP_USAGE_OFFLIST_BIT))
1067                        goto skip;
1068        }
1069
1070        for_each_node(nid)
1071                plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
1072
1073skip:
1074        spin_unlock(&swap_avail_lock);
1075}
1076
1077/*
1078 * swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock
1079 * within each cluster, so the total contribution to the global counter should
1080 * always be positive and cannot exceed the total number of usable slots.
1081 */
1082static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries)
1083{
1084        long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages);
1085
1086        /*
1087         * If device is full, and SWAP_USAGE_OFFLIST_BIT is not set,
1088         * remove it from the plist.
1089         */
1090        if (unlikely(val == si->pages)) {
1091                del_from_avail_list(si, false);
1092                return true;
1093        }
1094
1095        return false;
1096}
1097
1098static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries)
1099{
1100        long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages);
1101
1102        /*
1103         * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set,
1104         * add it to the plist.
1105         */
1106        if (unlikely(val & SWAP_USAGE_OFFLIST_BIT))
1107                add_to_avail_list(si, false);
1108}
1109
1110static void swap_range_alloc(struct swap_info_struct *si,
1111                             unsigned int nr_entries)
1112{
1113        if (swap_usage_add(si, nr_entries)) {
1114                if (vm_swap_full())
1115                        schedule_work(&si->reclaim_work);
1116        }
1117        atomic_long_sub(nr_entries, &nr_swap_pages);
1118}
1119
1120static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
1121                            unsigned int nr_entries)
1122{
1123        unsigned long begin = offset;
1124        unsigned long end = offset + nr_entries - 1;
1125        void (*swap_slot_free_notify)(struct block_device *, unsigned long);
1126        unsigned int i;
1127
1128        /*
1129         * Use atomic clear_bit operations only on zeromap instead of non-atomic
1130         * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
1131         */
1132        for (i = 0; i < nr_entries; i++) {
1133                clear_bit(offset + i, si->zeromap);
1134                zswap_invalidate(swp_entry(si->type, offset + i));
1135        }
1136
1137        if (si->flags & SWP_BLKDEV)
1138                swap_slot_free_notify =
1139                        si->bdev->bd_disk->fops->swap_slot_free_notify;
1140        else
1141                swap_slot_free_notify = NULL;
1142        while (offset <= end) {
1143                arch_swap_invalidate_page(si->type, offset);
1144                if (swap_slot_free_notify)
1145                        swap_slot_free_notify(si->bdev, offset);
1146                offset++;
1147        }
1148        clear_shadow_from_swap_cache(si->type, begin, end);
1149
1150        /*
1151         * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
1152         * only after the above cleanups are done.
1153         */
1154        smp_wmb();
1155        atomic_long_add(nr_entries, &nr_swap_pages);
1156        swap_usage_sub(si, nr_entries);
1157}
1158
1159static bool get_swap_device_info(struct swap_info_struct *si)
1160{
1161        if (!percpu_ref_tryget_live(&si->users))
1162                return false;
1163        /*
1164         * Guarantee the si->users are checked before accessing other
1165         * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is
1166         * up to dated.
1167         *
1168         * Paired with the spin_unlock() after setup_swap_info() in
1169         * enable_swap_info(), and smp_wmb() in swapoff.
1170         */
1171        smp_rmb();
1172        return true;
1173}
1174
1175/*
1176 * Fast path try to get swap entries with specified order from current
1177 * CPU's swap entry pool (a cluster).
1178 */
1179static bool swap_alloc_fast(swp_entry_t *entry,
1180                            int order)
1181{
1182        struct swap_cluster_info *ci;
1183        struct swap_info_struct *si;
1184        unsigned int offset, found = SWAP_ENTRY_INVALID;
1185
1186        /*
1187         * Once allocated, swap_info_struct will never be completely freed,
1188         * so checking it's liveness by get_swap_device_info is enough.
1189         */
1190        si = this_cpu_read(percpu_swap_cluster.si[order]);
1191        offset = this_cpu_read(percpu_swap_cluster.offset[order]);
1192        if (!si || !offset || !get_swap_device_info(si))
1193                return false;
1194
1195        ci = lock_cluster(si, offset);
1196        if (cluster_is_usable(ci, order)) {
1197                if (cluster_is_empty(ci))
1198                        offset = cluster_offset(si, ci);
1199                found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE);
1200                if (found)
1201                        *entry = swp_entry(si->type, found);
1202        } else {
1203                unlock_cluster(ci);
1204        }
1205
1206        put_swap_device(si);
1207        return !!found;
1208}
1209
1210/* Rotate the device and switch to a new cluster */
1211static bool swap_alloc_slow(swp_entry_t *entry,
1212                            int order)
1213{
1214        int node;
1215        unsigned long offset;
1216        struct swap_info_struct *si, *next;
1217
1218        node = numa_node_id();
1219        spin_lock(&swap_avail_lock);
1220start_over:
1221        plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1222                /* Rotate the device and switch to a new cluster */
1223                plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1224                spin_unlock(&swap_avail_lock);
1225                if (get_swap_device_info(si)) {
1226                        offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE);
1227                        put_swap_device(si);
1228                        if (offset) {
1229                                *entry = swp_entry(si->type, offset);
1230                                return true;
1231                        }
1232                        if (order)
1233                                return false;
1234                }
1235
1236                spin_lock(&swap_avail_lock);
1237                /*
1238                 * if we got here, it's likely that si was almost full before,
1239                 * and since scan_swap_map_slots() can drop the si->lock,
1240                 * multiple callers probably all tried to get a page from the
1241                 * same si and it filled up before we could get one; or, the si
1242                 * filled up between us dropping swap_avail_lock and taking
1243                 * si->lock. Since we dropped the swap_avail_lock, the
1244                 * swap_avail_head list may have been modified; so if next is
1245                 * still in the swap_avail_head list then try it, otherwise
1246                 * start over if we have not gotten any slots.
1247                 */
1248                if (plist_node_empty(&next->avail_lists[node]))
1249                        goto start_over;
1250        }
1251        spin_unlock(&swap_avail_lock);
1252        return false;
1253}
1254
1255/**
1256 * folio_alloc_swap - allocate swap space for a folio
1257 * @folio: folio we want to move to swap
1258 * @gfp: gfp mask for shadow nodes
1259 *
1260 * Allocate swap space for the folio and add the folio to the
1261 * swap cache.
1262 *
1263 * Context: Caller needs to hold the folio lock.
1264 * Return: Whether the folio was added to the swap cache.
1265 */
1266int folio_alloc_swap(struct folio *folio, gfp_t gfp)
1267{
1268        unsigned int order = folio_order(folio);
1269        unsigned int size = 1 << order;
1270        swp_entry_t entry = {};
1271
1272        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1273        VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
1274
1275        if (order) {
1276                /*
1277                 * Reject large allocation when THP_SWAP is disabled,
1278                 * the caller should split the folio and try again.
1279                 */
1280                if (!IS_ENABLED(CONFIG_THP_SWAP))
1281                        return -EAGAIN;
1282
1283                /*
1284                 * Allocation size should never exceed cluster size
1285                 * (HPAGE_PMD_SIZE).
1286                 */
1287                if (size > SWAPFILE_CLUSTER) {
1288                        VM_WARN_ON_ONCE(1);
1289                        return -EINVAL;
1290                }
1291        }
1292
1293        local_lock(&percpu_swap_cluster.lock);
1294        if (!swap_alloc_fast(&entry, order))
1295                swap_alloc_slow(&entry, order);
1296        local_unlock(&percpu_swap_cluster.lock);
1297
1298        /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */
1299        if (mem_cgroup_try_charge_swap(folio, entry))
1300                goto out_free;
1301
1302        if (!entry.val)
1303                return -ENOMEM;
1304
1305        /*
1306         * XArray node allocations from PF_MEMALLOC contexts could
1307         * completely exhaust the page allocator. __GFP_NOMEMALLOC
1308         * stops emergency reserves from being allocated.
1309         *
1310         * TODO: this could cause a theoretical memory reclaim
1311         * deadlock in the swap out path.
1312         */
1313        if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
1314                goto out_free;
1315
1316        return 0;
1317
1318out_free:
1319        put_swap_folio(folio, entry);
1320        return -ENOMEM;
1321}
1322
1323static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1324{
1325        struct swap_info_struct *si;
1326        unsigned long offset;
1327
1328        if (!entry.val)
1329                goto out;
1330        si = swp_swap_info(entry);
1331        if (!si)
1332                goto bad_nofile;
1333        if (data_race(!(si->flags & SWP_USED)))
1334                goto bad_device;
1335        offset = swp_offset(entry);
1336        if (offset >= si->max)
1337                goto bad_offset;
1338        if (data_race(!si->swap_map[swp_offset(entry)]))
1339                goto bad_free;
1340        return si;
1341
1342bad_free:
1343        pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1344        goto out;
1345bad_offset:
1346        pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1347        goto out;
1348bad_device:
1349        pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1350        goto out;
1351bad_nofile:
1352        pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1353out:
1354        return NULL;
1355}
1356
1357static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
1358                                           struct swap_cluster_info *ci,
1359                                           swp_entry_t entry,
1360                                           unsigned char usage)
1361{
1362        unsigned long offset = swp_offset(entry);
1363        unsigned char count;
1364        unsigned char has_cache;
1365
1366        count = si->swap_map[offset];
1367
1368        has_cache = count & SWAP_HAS_CACHE;
1369        count &= ~SWAP_HAS_CACHE;
1370
1371        if (usage == SWAP_HAS_CACHE) {
1372                VM_BUG_ON(!has_cache);
1373                has_cache = 0;
1374        } else if (count == SWAP_MAP_SHMEM) {
1375                /*
1376                 * Or we could insist on shmem.c using a special
1377                 * swap_shmem_free() and free_shmem_swap_and_cache()...
1378                 */
1379                count = 0;
1380        } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1381                if (count == COUNT_CONTINUED) {
1382                        if (swap_count_continued(si, offset, count))
1383                                count = SWAP_MAP_MAX | COUNT_CONTINUED;
1384                        else
1385                                count = SWAP_MAP_MAX;
1386                } else
1387                        count--;
1388        }
1389
1390        usage = count | has_cache;
1391        if (usage)
1392                WRITE_ONCE(si->swap_map[offset], usage);
1393        else
1394                swap_entries_free(si, ci, entry, 1);
1395
1396        return usage;
1397}
1398
1399/*
1400 * When we get a swap entry, if there aren't some other ways to
1401 * prevent swapoff, such as the folio in swap cache is locked, RCU
1402 * reader side is locked, etc., the swap entry may become invalid
1403 * because of swapoff.  Then, we need to enclose all swap related
1404 * functions with get_swap_device() and put_swap_device(), unless the
1405 * swap functions call get/put_swap_device() by themselves.
1406 *
1407 * RCU reader side lock (including any spinlock) is sufficient to
1408 * prevent swapoff, because synchronize_rcu() is called in swapoff()
1409 * before freeing data structures.
1410 *
1411 * Check whether swap entry is valid in the swap device.  If so,
1412 * return pointer to swap_info_struct, and keep the swap entry valid
1413 * via preventing the swap device from being swapoff, until
1414 * put_swap_device() is called.  Otherwise return NULL.
1415 *
1416 * Notice that swapoff or swapoff+swapon can still happen before the
1417 * percpu_ref_tryget_live() in get_swap_device() or after the
1418 * percpu_ref_put() in put_swap_device() if there isn't any other way
1419 * to prevent swapoff.  The caller must be prepared for that.  For
1420 * example, the following situation is possible.
1421 *
1422 *   CPU1                               CPU2
1423 *   do_swap_page()
1424 *     ...                              swapoff+swapon
1425 *     __read_swap_cache_async()
1426 *       swapcache_prepare()
1427 *         __swap_duplicate()
1428 *           // check swap_map
1429 *     // verify PTE not changed
1430 *
1431 * In __swap_duplicate(), the swap_map need to be checked before
1432 * changing partly because the specified swap entry may be for another
1433 * swap device which has been swapoff.  And in do_swap_page(), after
1434 * the page is read from the swap device, the PTE is verified not
1435 * changed with the page table locked to check whether the swap device
1436 * has been swapoff or swapoff+swapon.
1437 */
1438struct swap_info_struct *get_swap_device(swp_entry_t entry)
1439{
1440        struct swap_info_struct *si;
1441        unsigned long offset;
1442
1443        if (!entry.val)
1444                goto out;
1445        si = swp_swap_info(entry);
1446        if (!si)
1447                goto bad_nofile;
1448        if (!get_swap_device_info(si))
1449                goto out;
1450        offset = swp_offset(entry);
1451        if (offset >= si->max)
1452                goto put_out;
1453
1454        return si;
1455bad_nofile:
1456        pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1457out:
1458        return NULL;
1459put_out:
1460        pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1461        percpu_ref_put(&si->users);
1462        return NULL;
1463}
1464
1465static void swap_entries_put_cache(struct swap_info_struct *si,
1466                                   swp_entry_t entry, int nr)
1467{
1468        unsigned long offset = swp_offset(entry);
1469        struct swap_cluster_info *ci;
1470
1471        ci = lock_cluster(si, offset);
1472        if (swap_only_has_cache(si, offset, nr))
1473                swap_entries_free(si, ci, entry, nr);
1474        else {
1475                for (int i = 0; i < nr; i++, entry.val++)
1476                        swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
1477        }
1478        unlock_cluster(ci);
1479}
1480
1481static bool swap_entries_put_map(struct swap_info_struct *si,
1482                                 swp_entry_t entry, int nr)
1483{
1484        unsigned long offset = swp_offset(entry);
1485        struct swap_cluster_info *ci;
1486        bool has_cache = false;
1487        unsigned char count;
1488        int i;
1489
1490        if (nr <= 1)
1491                goto fallback;
1492        count = swap_count(data_race(si->swap_map[offset]));
1493        if (count != 1 && count != SWAP_MAP_SHMEM)
1494                goto fallback;
1495
1496        ci = lock_cluster(si, offset);
1497        if (!swap_is_last_map(si, offset, nr, &has_cache)) {
1498                goto locked_fallback;
1499        }
1500        if (!has_cache)
1501                swap_entries_free(si, ci, entry, nr);
1502        else
1503                for (i = 0; i < nr; i++)
1504                        WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
1505        unlock_cluster(ci);
1506
1507        return has_cache;
1508
1509fallback:
1510        ci = lock_cluster(si, offset);
1511locked_fallback:
1512        for (i = 0; i < nr; i++, entry.val++) {
1513                count = swap_entry_put_locked(si, ci, entry, 1);
1514                if (count == SWAP_HAS_CACHE)
1515                        has_cache = true;
1516        }
1517        unlock_cluster(ci);
1518        return has_cache;
1519
1520}
1521
1522/*
1523 * Only functions with "_nr" suffix are able to free entries spanning
1524 * cross multi clusters, so ensure the range is within a single cluster
1525 * when freeing entries with functions without "_nr" suffix.
1526 */
1527static bool swap_entries_put_map_nr(struct swap_info_struct *si,
1528                                    swp_entry_t entry, int nr)
1529{
1530        int cluster_nr, cluster_rest;
1531        unsigned long offset = swp_offset(entry);
1532        bool has_cache = false;
1533
1534        cluster_rest = SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER;
1535        while (nr) {
1536                cluster_nr = min(nr, cluster_rest);
1537                has_cache |= swap_entries_put_map(si, entry, cluster_nr);
1538                cluster_rest = SWAPFILE_CLUSTER;
1539                nr -= cluster_nr;
1540                entry.val += cluster_nr;
1541        }
1542
1543        return has_cache;
1544}
1545
1546/*
1547 * Check if it's the last ref of swap entry in the freeing path.
1548 * Qualified vlaue includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM.
1549 */
1550static inline bool __maybe_unused swap_is_last_ref(unsigned char count)
1551{
1552        return (count == SWAP_HAS_CACHE) || (count == 1) ||
1553               (count == SWAP_MAP_SHMEM);
1554}
1555
1556/*
1557 * Drop the last ref of swap entries, caller have to ensure all entries
1558 * belong to the same cgroup and cluster.
1559 */
1560static void swap_entries_free(struct swap_info_struct *si,
1561                              struct swap_cluster_info *ci,
1562                              swp_entry_t entry, unsigned int nr_pages)
1563{
1564        unsigned long offset = swp_offset(entry);
1565        unsigned char *map = si->swap_map + offset;
1566        unsigned char *map_end = map + nr_pages;
1567
1568        /* It should never free entries across different clusters */
1569        VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
1570        VM_BUG_ON(cluster_is_empty(ci));
1571        VM_BUG_ON(ci->count < nr_pages);
1572
1573        ci->count -= nr_pages;
1574        do {
1575                VM_BUG_ON(!swap_is_last_ref(*map));
1576                *map = 0;
1577        } while (++map < map_end);
1578
1579        mem_cgroup_uncharge_swap(entry, nr_pages);
1580        swap_range_free(si, offset, nr_pages);
1581
1582        if (!ci->count)
1583                free_cluster(si, ci);
1584        else
1585                partial_free_cluster(si, ci);
1586}
1587
1588/*
1589 * Caller has made sure that the swap device corresponding to entry
1590 * is still around or has not been recycled.
1591 */
1592void swap_free_nr(swp_entry_t entry, int nr_pages)
1593{
1594        int nr;
1595        struct swap_info_struct *sis;
1596        unsigned long offset = swp_offset(entry);
1597
1598        sis = _swap_info_get(entry);
1599        if (!sis)
1600                return;
1601
1602        while (nr_pages) {
1603                nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
1604                swap_entries_put_map(sis, swp_entry(sis->type, offset), nr);
1605                offset += nr;
1606                nr_pages -= nr;
1607        }
1608}
1609
1610/*
1611 * Called after dropping swapcache to decrease refcnt to swap entries.
1612 */
1613void put_swap_folio(struct folio *folio, swp_entry_t entry)
1614{
1615        struct swap_info_struct *si;
1616        int size = 1 << swap_entry_order(folio_order(folio));
1617
1618        si = _swap_info_get(entry);
1619        if (!si)
1620                return;
1621
1622        swap_entries_put_cache(si, entry, size);
1623}
1624
1625int __swap_count(swp_entry_t entry)
1626{
1627        struct swap_info_struct *si = swp_swap_info(entry);
1628        pgoff_t offset = swp_offset(entry);
1629
1630        return swap_count(si->swap_map[offset]);
1631}
1632
1633/*
1634 * How many references to @entry are currently swapped out?
1635 * This does not give an exact answer when swap count is continued,
1636 * but does include the high COUNT_CONTINUED flag to allow for that.
1637 */
1638bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry)
1639{
1640        pgoff_t offset = swp_offset(entry);
1641        struct swap_cluster_info *ci;
1642        int count;
1643
1644        ci = lock_cluster(si, offset);
1645        count = swap_count(si->swap_map[offset]);
1646        unlock_cluster(ci);
1647        return !!count;
1648}
1649
1650/*
1651 * How many references to @entry are currently swapped out?
1652 * This considers COUNT_CONTINUED so it returns exact answer.
1653 */
1654int swp_swapcount(swp_entry_t entry)
1655{
1656        int count, tmp_count, n;
1657        struct swap_info_struct *si;
1658        struct swap_cluster_info *ci;
1659        struct page *page;
1660        pgoff_t offset;
1661        unsigned char *map;
1662
1663        si = _swap_info_get(entry);
1664        if (!si)
1665                return 0;
1666
1667        offset = swp_offset(entry);
1668
1669        ci = lock_cluster(si, offset);
1670
1671        count = swap_count(si->swap_map[offset]);
1672        if (!(count & COUNT_CONTINUED))
1673                goto out;
1674
1675        count &= ~COUNT_CONTINUED;
1676        n = SWAP_MAP_MAX + 1;
1677
1678        page = vmalloc_to_page(si->swap_map + offset);
1679        offset &= ~PAGE_MASK;
1680        VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1681
1682        do {
1683                page = list_next_entry(page, lru);
1684                map = kmap_local_page(page);
1685                tmp_count = map[offset];
1686                kunmap_local(map);
1687
1688                count += (tmp_count & ~COUNT_CONTINUED) * n;
1689                n *= (SWAP_CONT_MAX + 1);
1690        } while (tmp_count & COUNT_CONTINUED);
1691out:
1692        unlock_cluster(ci);
1693        return count;
1694}
1695
1696static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1697                                         swp_entry_t entry, int order)
1698{
1699        struct swap_cluster_info *ci;
1700        unsigned char *map = si->swap_map;
1701        unsigned int nr_pages = 1 << order;
1702        unsigned long roffset = swp_offset(entry);
1703        unsigned long offset = round_down(roffset, nr_pages);
1704        int i;
1705        bool ret = false;
1706
1707        ci = lock_cluster(si, offset);
1708        if (nr_pages == 1) {
1709                if (swap_count(map[roffset]))
1710                        ret = true;
1711                goto unlock_out;
1712        }
1713        for (i = 0; i < nr_pages; i++) {
1714                if (swap_count(map[offset + i])) {
1715                        ret = true;
1716                        break;
1717                }
1718        }
1719unlock_out:
1720        unlock_cluster(ci);
1721        return ret;
1722}
1723
1724static bool folio_swapped(struct folio *folio)
1725{
1726        swp_entry_t entry = folio->swap;
1727        struct swap_info_struct *si = _swap_info_get(entry);
1728
1729        if (!si)
1730                return false;
1731
1732        if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
1733                return swap_entry_swapped(si, entry);
1734
1735        return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
1736}
1737
1738static bool folio_swapcache_freeable(struct folio *folio)
1739{
1740        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1741
1742        if (!folio_test_swapcache(folio))
1743                return false;
1744        if (folio_test_writeback(folio))
1745                return false;
1746
1747        /*
1748         * Once hibernation has begun to create its image of memory,
1749         * there's a danger that one of the calls to folio_free_swap()
1750         * - most probably a call from __try_to_reclaim_swap() while
1751         * hibernation is allocating its own swap pages for the image,
1752         * but conceivably even a call from memory reclaim - will free
1753         * the swap from a folio which has already been recorded in the
1754         * image as a clean swapcache folio, and then reuse its swap for
1755         * another page of the image.  On waking from hibernation, the
1756         * original folio might be freed under memory pressure, then
1757         * later read back in from swap, now with the wrong data.
1758         *
1759         * Hibernation suspends storage while it is writing the image
1760         * to disk so check that here.
1761         */
1762        if (pm_suspended_storage())
1763                return false;
1764
1765        return true;
1766}
1767
1768/**
1769 * folio_free_swap() - Free the swap space used for this folio.
1770 * @folio: The folio to remove.
1771 *
1772 * If swap is getting full, or if there are no more mappings of this folio,
1773 * then call folio_free_swap to free its swap space.
1774 *
1775 * Return: true if we were able to release the swap space.
1776 */
1777bool folio_free_swap(struct folio *folio)
1778{
1779        if (!folio_swapcache_freeable(folio))
1780                return false;
1781        if (folio_swapped(folio))
1782                return false;
1783
1784        delete_from_swap_cache(folio);
1785        folio_set_dirty(folio);
1786        return true;
1787}
1788
1789/**
1790 * free_swap_and_cache_nr() - Release reference on range of swap entries and
1791 *                            reclaim their cache if no more references remain.
1792 * @entry: First entry of range.
1793 * @nr: Number of entries in range.
1794 *
1795 * For each swap entry in the contiguous range, release a reference. If any swap
1796 * entries become free, try to reclaim their underlying folios, if present. The
1797 * offset range is defined by [entry.offset, entry.offset + nr).
1798 */
1799void free_swap_and_cache_nr(swp_entry_t entry, int nr)
1800{
1801        const unsigned long start_offset = swp_offset(entry);
1802        const unsigned long end_offset = start_offset + nr;
1803        struct swap_info_struct *si;
1804        bool any_only_cache = false;
1805        unsigned long offset;
1806
1807        si = get_swap_device(entry);
1808        if (!si)
1809                return;
1810
1811        if (WARN_ON(end_offset > si->max))
1812                goto out;
1813
1814        /*
1815         * First free all entries in the range.
1816         */
1817        any_only_cache = swap_entries_put_map_nr(si, entry, nr);
1818
1819        /*
1820         * Short-circuit the below loop if none of the entries had their
1821         * reference drop to zero.
1822         */
1823        if (!any_only_cache)
1824                goto out;
1825
1826        /*
1827         * Now go back over the range trying to reclaim the swap cache.
1828         */
1829        for (offset = start_offset; offset < end_offset; offset += nr) {
1830                nr = 1;
1831                if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
1832                        /*
1833                         * Folios are always naturally aligned in swap so
1834                         * advance forward to the next boundary. Zero means no
1835                         * folio was found for the swap entry, so advance by 1
1836                         * in this case. Negative value means folio was found
1837                         * but could not be reclaimed. Here we can still advance
1838                         * to the next boundary.
1839                         */
1840                        nr = __try_to_reclaim_swap(si, offset,
1841                                                   TTRS_UNMAPPED | TTRS_FULL);
1842                        if (nr == 0)
1843                                nr = 1;
1844                        else if (nr < 0)
1845                                nr = -nr;
1846                        nr = ALIGN(offset + 1, nr) - offset;
1847                }
1848        }
1849
1850out:
1851        put_swap_device(si);
1852}
1853
1854#ifdef CONFIG_HIBERNATION
1855
1856swp_entry_t get_swap_page_of_type(int type)
1857{
1858        struct swap_info_struct *si = swap_type_to_swap_info(type);
1859        unsigned long offset;
1860        swp_entry_t entry = {0};
1861
1862        if (!si)
1863                goto fail;
1864
1865        /* This is called for allocating swap entry, not cache */
1866        if (get_swap_device_info(si)) {
1867                if (si->flags & SWP_WRITEOK) {
1868                        offset = cluster_alloc_swap_entry(si, 0, 1);
1869                        if (offset) {
1870                                entry = swp_entry(si->type, offset);
1871                                atomic_long_dec(&nr_swap_pages);
1872                        }
1873                }
1874                put_swap_device(si);
1875        }
1876fail:
1877        return entry;
1878}
1879
1880/*
1881 * Find the swap type that corresponds to given device (if any).
1882 *
1883 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1884 * from 0, in which the swap header is expected to be located.
1885 *
1886 * This is needed for the suspend to disk (aka swsusp).
1887 */
1888int swap_type_of(dev_t device, sector_t offset)
1889{
1890        int type;
1891
1892        if (!device)
1893                return -1;
1894
1895        spin_lock(&swap_lock);
1896        for (type = 0; type < nr_swapfiles; type++) {
1897                struct swap_info_struct *sis = swap_info[type];
1898
1899                if (!(sis->flags & SWP_WRITEOK))
1900                        continue;
1901
1902                if (device == sis->bdev->bd_dev) {
1903                        struct swap_extent *se = first_se(sis);
1904
1905                        if (se->start_block == offset) {
1906                                spin_unlock(&swap_lock);
1907                                return type;
1908                        }
1909                }
1910        }
1911        spin_unlock(&swap_lock);
1912        return -ENODEV;
1913}
1914
1915int find_first_swap(dev_t *device)
1916{
1917        int type;
1918
1919        spin_lock(&swap_lock);
1920        for (type = 0; type < nr_swapfiles; type++) {
1921                struct swap_info_struct *sis = swap_info[type];
1922
1923                if (!(sis->flags & SWP_WRITEOK))
1924                        continue;
1925                *device = sis->bdev->bd_dev;
1926                spin_unlock(&swap_lock);
1927                return type;
1928        }
1929        spin_unlock(&swap_lock);
1930        return -ENODEV;
1931}
1932
1933/*
1934 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1935 * corresponding to given index in swap_info (swap type).
1936 */
1937sector_t swapdev_block(int type, pgoff_t offset)
1938{
1939        struct swap_info_struct *si = swap_type_to_swap_info(type);
1940        struct swap_extent *se;
1941
1942        if (!si || !(si->flags & SWP_WRITEOK))
1943                return 0;
1944        se = offset_to_swap_extent(si, offset);
1945        return se->start_block + (offset - se->start_page);
1946}
1947
1948/*
1949 * Return either the total number of swap pages of given type, or the number
1950 * of free pages of that type (depending on @free)
1951 *
1952 * This is needed for software suspend
1953 */
1954unsigned int count_swap_pages(int type, int free)
1955{
1956        unsigned int n = 0;
1957
1958        spin_lock(&swap_lock);
1959        if ((unsigned int)type < nr_swapfiles) {
1960                struct swap_info_struct *sis = swap_info[type];
1961
1962                spin_lock(&sis->lock);
1963                if (sis->flags & SWP_WRITEOK) {
1964                        n = sis->pages;
1965                        if (free)
1966                                n -= swap_usage_in_pages(sis);
1967                }
1968                spin_unlock(&sis->lock);
1969        }
1970        spin_unlock(&swap_lock);
1971        return n;
1972}
1973#endif /* CONFIG_HIBERNATION */
1974
1975static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1976{
1977        return pte_same(pte_swp_clear_flags(pte), swp_pte);
1978}
1979
1980/*
1981 * No need to decide whether this PTE shares the swap entry with others,
1982 * just let do_wp_page work it out if a write is requested later - to
1983 * force COW, vm_page_prot omits write permission from any private vma.
1984 */
1985static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1986                unsigned long addr, swp_entry_t entry, struct folio *folio)
1987{
1988        struct page *page;
1989        struct folio *swapcache;
1990        spinlock_t *ptl;
1991        pte_t *pte, new_pte, old_pte;
1992        bool hwpoisoned = false;
1993        int ret = 1;
1994
1995        swapcache = folio;
1996        folio = ksm_might_need_to_copy(folio, vma, addr);
1997        if (unlikely(!folio))
1998                return -ENOMEM;
1999        else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
2000                hwpoisoned = true;
2001                folio = swapcache;
2002        }
2003
2004        page = folio_file_page(folio, swp_offset(entry));
2005        if (PageHWPoison(page))
2006                hwpoisoned = true;
2007
2008        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
2009        if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
2010                                                swp_entry_to_pte(entry)))) {
2011                ret = 0;
2012                goto out;
2013        }
2014
2015        old_pte = ptep_get(pte);
2016
2017        if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
2018                swp_entry_t swp_entry;
2019
2020                dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2021                if (hwpoisoned) {
2022                        swp_entry = make_hwpoison_entry(page);
2023                } else {
2024                        swp_entry = make_poisoned_swp_entry();
2025                }
2026                new_pte = swp_entry_to_pte(swp_entry);
2027                ret = 0;
2028                goto setpte;
2029        }
2030
2031        /*
2032         * Some architectures may have to restore extra metadata to the page
2033         * when reading from swap. This metadata may be indexed by swap entry
2034         * so this must be called before swap_free().
2035         */
2036        arch_swap_restore(folio_swap(entry, folio), folio);
2037
2038        dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2039        inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
2040        folio_get(folio);
2041        if (folio == swapcache) {
2042                rmap_t rmap_flags = RMAP_NONE;
2043
2044                /*
2045                 * See do_swap_page(): writeback would be problematic.
2046                 * However, we do a folio_wait_writeback() just before this
2047                 * call and have the folio locked.
2048                 */
2049                VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
2050                if (pte_swp_exclusive(old_pte))
2051                        rmap_flags |= RMAP_EXCLUSIVE;
2052                /*
2053                 * We currently only expect small !anon folios, which are either
2054                 * fully exclusive or fully shared. If we ever get large folios
2055                 * here, we have to be careful.
2056                 */
2057                if (!folio_test_anon(folio)) {
2058                        VM_WARN_ON_ONCE(folio_test_large(folio));
2059                        VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2060                        folio_add_new_anon_rmap(folio, vma, addr, rmap_flags);
2061                } else {
2062                        folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
2063                }
2064        } else { /* ksm created a completely new copy */
2065                folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
2066                folio_add_lru_vma(folio, vma);
2067        }
2068        new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
2069        if (pte_swp_soft_dirty(old_pte))
2070                new_pte = pte_mksoft_dirty(new_pte);
2071        if (pte_swp_uffd_wp(old_pte))
2072                new_pte = pte_mkuffd_wp(new_pte);
2073setpte:
2074        set_pte_at(vma->vm_mm, addr, pte, new_pte);
2075        swap_free(entry);
2076out:
2077        if (pte)
2078                pte_unmap_unlock(pte, ptl);
2079        if (folio != swapcache) {
2080                folio_unlock(folio);
2081                folio_put(folio);
2082        }
2083        return ret;
2084}
2085
2086static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
2087                        unsigned long addr, unsigned long end,
2088                        unsigned int type)
2089{
2090        pte_t *pte = NULL;
2091        struct swap_info_struct *si;
2092
2093        si = swap_info[type];
2094        do {
2095                struct folio *folio;
2096                unsigned long offset;
2097                unsigned char swp_count;
2098                swp_entry_t entry;
2099                int ret;
2100                pte_t ptent;
2101
2102                if (!pte++) {
2103                        pte = pte_offset_map(pmd, addr);
2104                        if (!pte)
2105                                break;
2106                }
2107
2108                ptent = ptep_get_lockless(pte);
2109
2110                if (!is_swap_pte(ptent))
2111                        continue;
2112
2113                entry = pte_to_swp_entry(ptent);
2114                if (swp_type(entry) != type)
2115                        continue;
2116
2117                offset = swp_offset(entry);
2118                pte_unmap(pte);
2119                pte = NULL;
2120
2121                folio = swap_cache_get_folio(entry, vma, addr);
2122                if (!folio) {
2123                        struct vm_fault vmf = {
2124                                .vma = vma,
2125                                .address = addr,
2126                                .real_address = addr,
2127                                .pmd = pmd,
2128                        };
2129
2130                        folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
2131                                                &vmf);
2132                }
2133                if (!folio) {
2134                        swp_count = READ_ONCE(si->swap_map[offset]);
2135                        if (swp_count == 0 || swp_count == SWAP_MAP_BAD)
2136                                continue;
2137                        return -ENOMEM;
2138                }
2139
2140                folio_lock(folio);
2141                folio_wait_writeback(folio);
2142                ret = unuse_pte(vma, pmd, addr, entry, folio);
2143                if (ret < 0) {
2144                        folio_unlock(folio);
2145                        folio_put(folio);
2146                        return ret;
2147                }
2148
2149                folio_free_swap(folio);
2150                folio_unlock(folio);
2151                folio_put(folio);
2152        } while (addr += PAGE_SIZE, addr != end);
2153
2154        if (pte)
2155                pte_unmap(pte);
2156        return 0;
2157}
2158
2159static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2160                                unsigned long addr, unsigned long end,
2161                                unsigned int type)
2162{
2163        pmd_t *pmd;
2164        unsigned long next;
2165        int ret;
2166
2167        pmd = pmd_offset(pud, addr);
2168        do {
2169                cond_resched();
2170                next = pmd_addr_end(addr, end);
2171                ret = unuse_pte_range(vma, pmd, addr, next, type);
2172                if (ret)
2173                        return ret;
2174        } while (pmd++, addr = next, addr != end);
2175        return 0;
2176}
2177
2178static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2179                                unsigned long addr, unsigned long end,
2180                                unsigned int type)
2181{
2182        pud_t *pud;
2183        unsigned long next;
2184        int ret;
2185
2186        pud = pud_offset(p4d, addr);
2187        do {
2188                next = pud_addr_end(addr, end);
2189                if (pud_none_or_clear_bad(pud))
2190                        continue;
2191                ret = unuse_pmd_range(vma, pud, addr, next, type);
2192                if (ret)
2193                        return ret;
2194        } while (pud++, addr = next, addr != end);
2195        return 0;
2196}
2197
2198static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2199                                unsigned long addr, unsigned long end,
2200                                unsigned int type)
2201{
2202        p4d_t *p4d;
2203        unsigned long next;
2204        int ret;
2205
2206        p4d = p4d_offset(pgd, addr);
2207        do {
2208                next = p4d_addr_end(addr, end);
2209                if (p4d_none_or_clear_bad(p4d))
2210                        continue;
2211                ret = unuse_pud_range(vma, p4d, addr, next, type);
2212                if (ret)
2213                        return ret;
2214        } while (p4d++, addr = next, addr != end);
2215        return 0;
2216}
2217
2218static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
2219{
2220        pgd_t *pgd;
2221        unsigned long addr, end, next;
2222        int ret;
2223
2224        addr = vma->vm_start;
2225        end = vma->vm_end;
2226
2227        pgd = pgd_offset(vma->vm_mm, addr);
2228        do {
2229                next = pgd_addr_end(addr, end);
2230                if (pgd_none_or_clear_bad(pgd))
2231                        continue;
2232                ret = unuse_p4d_range(vma, pgd, addr, next, type);
2233                if (ret)
2234                        return ret;
2235        } while (pgd++, addr = next, addr != end);
2236        return 0;
2237}
2238
2239static int unuse_mm(struct mm_struct *mm, unsigned int type)
2240{
2241        struct vm_area_struct *vma;
2242        int ret = 0;
2243        VMA_ITERATOR(vmi, mm, 0);
2244
2245        mmap_read_lock(mm);
2246        for_each_vma(vmi, vma) {
2247                if (vma->anon_vma && !is_vm_hugetlb_page(vma)) {
2248                        ret = unuse_vma(vma, type);
2249                        if (ret)
2250                                break;
2251                }
2252
2253                cond_resched();
2254        }
2255        mmap_read_unlock(mm);
2256        return ret;
2257}
2258
2259/*
2260 * Scan swap_map from current position to next entry still in use.
2261 * Return 0 if there are no inuse entries after prev till end of
2262 * the map.
2263 */
2264static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2265                                        unsigned int prev)
2266{
2267        unsigned int i;
2268        unsigned char count;
2269
2270        /*
2271         * No need for swap_lock here: we're just looking
2272         * for whether an entry is in use, not modifying it; false
2273         * hits are okay, and sys_swapoff() has already prevented new
2274         * allocations from this area (while holding swap_lock).
2275         */
2276        for (i = prev + 1; i < si->max; i++) {
2277                count = READ_ONCE(si->swap_map[i]);
2278                if (count && swap_count(count) != SWAP_MAP_BAD)
2279                        break;
2280                if ((i % LATENCY_LIMIT) == 0)
2281                        cond_resched();
2282        }
2283
2284        if (i == si->max)
2285                i = 0;
2286
2287        return i;
2288}
2289
2290static int try_to_unuse(unsigned int type)
2291{
2292        struct mm_struct *prev_mm;
2293        struct mm_struct *mm;
2294        struct list_head *p;
2295        int retval = 0;
2296        struct swap_info_struct *si = swap_info[type];
2297        struct folio *folio;
2298        swp_entry_t entry;
2299        unsigned int i;
2300
2301        if (!swap_usage_in_pages(si))
2302                goto success;
2303
2304retry:
2305        retval = shmem_unuse(type);
2306        if (retval)
2307                return retval;
2308
2309        prev_mm = &init_mm;
2310        mmget(prev_mm);
2311
2312        spin_lock(&mmlist_lock);
2313        p = &init_mm.mmlist;
2314        while (swap_usage_in_pages(si) &&
2315               !signal_pending(current) &&
2316               (p = p->next) != &init_mm.mmlist) {
2317
2318                mm = list_entry(p, struct mm_struct, mmlist);
2319                if (!mmget_not_zero(mm))
2320                        continue;
2321                spin_unlock(&mmlist_lock);
2322                mmput(prev_mm);
2323                prev_mm = mm;
2324                retval = unuse_mm(mm, type);
2325                if (retval) {
2326                        mmput(prev_mm);
2327                        return retval;
2328                }
2329
2330                /*
2331                 * Make sure that we aren't completely killing
2332                 * interactive performance.
2333                 */
2334                cond_resched();
2335                spin_lock(&mmlist_lock);
2336        }
2337        spin_unlock(&mmlist_lock);
2338
2339        mmput(prev_mm);
2340
2341        i = 0;
2342        while (swap_usage_in_pages(si) &&
2343               !signal_pending(current) &&
2344               (i = find_next_to_unuse(si, i)) != 0) {
2345
2346                entry = swp_entry(type, i);
2347                folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
2348                if (IS_ERR(folio))
2349                        continue;
2350
2351                /*
2352                 * It is conceivable that a racing task removed this folio from
2353                 * swap cache just before we acquired the page lock. The folio
2354                 * might even be back in swap cache on another swap area. But
2355                 * that is okay, folio_free_swap() only removes stale folios.
2356                 */
2357                folio_lock(folio);
2358                folio_wait_writeback(folio);
2359                folio_free_swap(folio);
2360                folio_unlock(folio);
2361                folio_put(folio);
2362        }
2363
2364        /*
2365         * Lets check again to see if there are still swap entries in the map.
2366         * If yes, we would need to do retry the unuse logic again.
2367         * Under global memory pressure, swap entries can be reinserted back
2368         * into process space after the mmlist loop above passes over them.
2369         *
2370         * Limit the number of retries? No: when mmget_not_zero()
2371         * above fails, that mm is likely to be freeing swap from
2372         * exit_mmap(), which proceeds at its own independent pace;
2373         * and even shmem_writeout() could have been preempted after
2374         * folio_alloc_swap(), temporarily hiding that swap.  It's easy
2375         * and robust (though cpu-intensive) just to keep retrying.
2376         */
2377        if (swap_usage_in_pages(si)) {
2378                if (!signal_pending(current))
2379                        goto retry;
2380                return -EINTR;
2381        }
2382
2383success:
2384        /*
2385         * Make sure that further cleanups after try_to_unuse() returns happen
2386         * after swap_range_free() reduces si->inuse_pages to 0.
2387         */
2388        smp_mb();
2389        return 0;
2390}
2391
2392/*
2393 * After a successful try_to_unuse, if no swap is now in use, we know
2394 * we can empty the mmlist.  swap_lock must be held on entry and exit.
2395 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2396 * added to the mmlist just after page_duplicate - before would be racy.
2397 */
2398static void drain_mmlist(void)
2399{
2400        struct list_head *p, *next;
2401        unsigned int type;
2402
2403        for (type = 0; type < nr_swapfiles; type++)
2404                if (swap_usage_in_pages(swap_info[type]))
2405                        return;
2406        spin_lock(&mmlist_lock);
2407        list_for_each_safe(p, next, &init_mm.mmlist)
2408                list_del_init(p);
2409        spin_unlock(&mmlist_lock);
2410}
2411
2412/*
2413 * Free all of a swapdev's extent information
2414 */
2415static void destroy_swap_extents(struct swap_info_struct *sis)
2416{
2417        while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2418                struct rb_node *rb = sis->swap_extent_root.rb_node;
2419                struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2420
2421                rb_erase(rb, &sis->swap_extent_root);
2422                kfree(se);
2423        }
2424
2425        if (sis->flags & SWP_ACTIVATED) {
2426                struct file *swap_file = sis->swap_file;
2427                struct address_space *mapping = swap_file->f_mapping;
2428
2429                sis->flags &= ~SWP_ACTIVATED;
2430                if (mapping->a_ops->swap_deactivate)
2431                        mapping->a_ops->swap_deactivate(swap_file);
2432        }
2433}
2434
2435/*
2436 * Add a block range (and the corresponding page range) into this swapdev's
2437 * extent tree.
2438 *
2439 * This function rather assumes that it is called in ascending page order.
2440 */
2441int
2442add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2443                unsigned long nr_pages, sector_t start_block)
2444{
2445        struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2446        struct swap_extent *se;
2447        struct swap_extent *new_se;
2448
2449        /*
2450         * place the new node at the right most since the
2451         * function is called in ascending page order.
2452         */
2453        while (*link) {
2454                parent = *link;
2455                link = &parent->rb_right;
2456        }
2457
2458        if (parent) {
2459                se = rb_entry(parent, struct swap_extent, rb_node);
2460                BUG_ON(se->start_page + se->nr_pages != start_page);
2461                if (se->start_block + se->nr_pages == start_block) {
2462                        /* Merge it */
2463                        se->nr_pages += nr_pages;
2464                        return 0;
2465                }
2466        }
2467
2468        /* No merge, insert a new extent. */
2469        new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2470        if (new_se == NULL)
2471                return -ENOMEM;
2472        new_se->start_page = start_page;
2473        new_se->nr_pages = nr_pages;
2474        new_se->start_block = start_block;
2475
2476        rb_link_node(&new_se->rb_node, parent, link);
2477        rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2478        return 1;
2479}
2480EXPORT_SYMBOL_GPL(add_swap_extent);
2481
2482/*
2483 * A `swap extent' is a simple thing which maps a contiguous range of pages
2484 * onto a contiguous range of disk blocks.  A rbtree of swap extents is
2485 * built at swapon time and is then used at swap_writepage/swap_read_folio
2486 * time for locating where on disk a page belongs.
2487 *
2488 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2489 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2490 * swap files identically.
2491 *
2492 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2493 * extent rbtree operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2494 * swapfiles are handled *identically* after swapon time.
2495 *
2496 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2497 * and will parse them into a rbtree, in PAGE_SIZE chunks.  If some stray
2498 * blocks are found which do not fall within the PAGE_SIZE alignment
2499 * requirements, they are simply tossed out - we will never use those blocks
2500 * for swapping.
2501 *
2502 * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2503 * prevents users from writing to the swap device, which will corrupt memory.
2504 *
2505 * The amount of disk space which a single swap extent represents varies.
2506 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2507 * extents in the rbtree. - akpm.
2508 */
2509static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2510{
2511        struct file *swap_file = sis->swap_file;
2512        struct address_space *mapping = swap_file->f_mapping;
2513        struct inode *inode = mapping->host;
2514        int ret;
2515
2516        if (S_ISBLK(inode->i_mode)) {
2517                ret = add_swap_extent(sis, 0, sis->max, 0);
2518                *span = sis->pages;
2519                return ret;
2520        }
2521
2522        if (mapping->a_ops->swap_activate) {
2523                ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2524                if (ret < 0)
2525                        return ret;
2526                sis->flags |= SWP_ACTIVATED;
2527                if ((sis->flags & SWP_FS_OPS) &&
2528                    sio_pool_init() != 0) {
2529                        destroy_swap_extents(sis);
2530                        return -ENOMEM;
2531                }
2532                return ret;
2533        }
2534
2535        return generic_swapfile_activate(sis, swap_file, span);
2536}
2537
2538static int swap_node(struct swap_info_struct *si)
2539{
2540        struct block_device *bdev;
2541
2542        if (si->bdev)
2543                bdev = si->bdev;
2544        else
2545                bdev = si->swap_file->f_inode->i_sb->s_bdev;
2546
2547        return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2548}
2549
2550static void setup_swap_info(struct swap_info_struct *si, int prio,
2551                            unsigned char *swap_map,
2552                            struct swap_cluster_info *cluster_info,
2553                            unsigned long *zeromap)
2554{
2555        int i;
2556
2557        if (prio >= 0)
2558                si->prio = prio;
2559        else
2560                si->prio = --least_priority;
2561        /*
2562         * the plist prio is negated because plist ordering is
2563         * low-to-high, while swap ordering is high-to-low
2564         */
2565        si->list.prio = -si->prio;
2566        for_each_node(i) {
2567                if (si->prio >= 0)
2568                        si->avail_lists[i].prio = -si->prio;
2569                else {
2570                        if (swap_node(si) == i)
2571                                si->avail_lists[i].prio = 1;
2572                        else
2573                                si->avail_lists[i].prio = -si->prio;
2574                }
2575        }
2576        si->swap_map = swap_map;
2577        si->cluster_info = cluster_info;
2578        si->zeromap = zeromap;
2579}
2580
2581static void _enable_swap_info(struct swap_info_struct *si)
2582{
2583        atomic_long_add(si->pages, &nr_swap_pages);
2584        total_swap_pages += si->pages;
2585
2586        assert_spin_locked(&swap_lock);
2587        /*
2588         * both lists are plists, and thus priority ordered.
2589         * swap_active_head needs to be priority ordered for swapoff(),
2590         * which on removal of any swap_info_struct with an auto-assigned
2591         * (i.e. negative) priority increments the auto-assigned priority
2592         * of any lower-priority swap_info_structs.
2593         * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
2594         * which allocates swap pages from the highest available priority
2595         * swap_info_struct.
2596         */
2597        plist_add(&si->list, &swap_active_head);
2598
2599        /* Add back to available list */
2600        add_to_avail_list(si, true);
2601}
2602
2603static void enable_swap_info(struct swap_info_struct *si, int prio,
2604                                unsigned char *swap_map,
2605                                struct swap_cluster_info *cluster_info,
2606                                unsigned long *zeromap)
2607{
2608        spin_lock(&swap_lock);
2609        spin_lock(&si->lock);
2610        setup_swap_info(si, prio, swap_map, cluster_info, zeromap);
2611        spin_unlock(&si->lock);
2612        spin_unlock(&swap_lock);
2613        /*
2614         * Finished initializing swap device, now it's safe to reference it.
2615         */
2616        percpu_ref_resurrect(&si->users);
2617        spin_lock(&swap_lock);
2618        spin_lock(&si->lock);
2619        _enable_swap_info(si);
2620        spin_unlock(&si->lock);
2621        spin_unlock(&swap_lock);
2622}
2623
2624static void reinsert_swap_info(struct swap_info_struct *si)
2625{
2626        spin_lock(&swap_lock);
2627        spin_lock(&si->lock);
2628        setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap);
2629        _enable_swap_info(si);
2630        spin_unlock(&si->lock);
2631        spin_unlock(&swap_lock);
2632}
2633
2634/*
2635 * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range
2636 * see the updated flags, so there will be no more allocations.
2637 */
2638static void wait_for_allocation(struct swap_info_struct *si)
2639{
2640        unsigned long offset;
2641        unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER);
2642        struct swap_cluster_info *ci;
2643
2644        BUG_ON(si->flags & SWP_WRITEOK);
2645
2646        for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) {
2647                ci = lock_cluster(si, offset);
2648                unlock_cluster(ci);
2649        }
2650}
2651
2652/*
2653 * Called after swap device's reference count is dead, so
2654 * neither scan nor allocation will use it.
2655 */
2656static void flush_percpu_swap_cluster(struct swap_info_struct *si)
2657{
2658        int cpu, i;
2659        struct swap_info_struct **pcp_si;
2660
2661        for_each_possible_cpu(cpu) {
2662                pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu);
2663                /*
2664                 * Invalidate the percpu swap cluster cache, si->users
2665                 * is dead, so no new user will point to it, just flush
2666                 * any existing user.
2667                 */
2668                for (i = 0; i < SWAP_NR_ORDERS; i++)
2669                        cmpxchg(&pcp_si[i], si, NULL);
2670        }
2671}
2672
2673
2674SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2675{
2676        struct swap_info_struct *p = NULL;
2677        unsigned char *swap_map;
2678        unsigned long *zeromap;
2679        struct swap_cluster_info *cluster_info;
2680        struct file *swap_file, *victim;
2681        struct address_space *mapping;
2682        struct inode *inode;
2683        struct filename *pathname;
2684        int err, found = 0;
2685
2686        if (!capable(CAP_SYS_ADMIN))
2687                return -EPERM;
2688
2689        BUG_ON(!current->mm);
2690
2691        pathname = getname(specialfile);
2692        if (IS_ERR(pathname))
2693                return PTR_ERR(pathname);
2694
2695        victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2696        err = PTR_ERR(victim);
2697        if (IS_ERR(victim))
2698                goto out;
2699
2700        mapping = victim->f_mapping;
2701        spin_lock(&swap_lock);
2702        plist_for_each_entry(p, &swap_active_head, list) {
2703                if (p->flags & SWP_WRITEOK) {
2704                        if (p->swap_file->f_mapping == mapping) {
2705                                found = 1;
2706                                break;
2707                        }
2708                }
2709        }
2710        if (!found) {
2711                err = -EINVAL;
2712                spin_unlock(&swap_lock);
2713                goto out_dput;
2714        }
2715        if (!security_vm_enough_memory_mm(current->mm, p->pages))
2716                vm_unacct_memory(p->pages);
2717        else {
2718                err = -ENOMEM;
2719                spin_unlock(&swap_lock);
2720                goto out_dput;
2721        }
2722        spin_lock(&p->lock);
2723        del_from_avail_list(p, true);
2724        if (p->prio < 0) {
2725                struct swap_info_struct *si = p;
2726                int nid;
2727
2728                plist_for_each_entry_continue(si, &swap_active_head, list) {
2729                        si->prio++;
2730                        si->list.prio--;
2731                        for_each_node(nid) {
2732                                if (si->avail_lists[nid].prio != 1)
2733                                        si->avail_lists[nid].prio--;
2734                        }
2735                }
2736                least_priority++;
2737        }
2738        plist_del(&p->list, &swap_active_head);
2739        atomic_long_sub(p->pages, &nr_swap_pages);
2740        total_swap_pages -= p->pages;
2741        spin_unlock(&p->lock);
2742        spin_unlock(&swap_lock);
2743
2744        wait_for_allocation(p);
2745
2746        set_current_oom_origin();
2747        err = try_to_unuse(p->type);
2748        clear_current_oom_origin();
2749
2750        if (err) {
2751                /* re-insert swap space back into swap_list */
2752                reinsert_swap_info(p);
2753                goto out_dput;
2754        }
2755
2756        /*
2757         * Wait for swap operations protected by get/put_swap_device()
2758         * to complete.  Because of synchronize_rcu() here, all swap
2759         * operations protected by RCU reader side lock (including any
2760         * spinlock) will be waited too.  This makes it easy to
2761         * prevent folio_test_swapcache() and the following swap cache
2762         * operations from racing with swapoff.
2763         */
2764        percpu_ref_kill(&p->users);
2765        synchronize_rcu();
2766        wait_for_completion(&p->comp);
2767
2768        flush_work(&p->discard_work);
2769        flush_work(&p->reclaim_work);
2770        flush_percpu_swap_cluster(p);
2771
2772        destroy_swap_extents(p);
2773        if (p->flags & SWP_CONTINUED)
2774                free_swap_count_continuations(p);
2775
2776        if (!p->bdev || !bdev_nonrot(p->bdev))
2777                atomic_dec(&nr_rotate_swap);
2778
2779        mutex_lock(&swapon_mutex);
2780        spin_lock(&swap_lock);
2781        spin_lock(&p->lock);
2782        drain_mmlist();
2783
2784        swap_file = p->swap_file;
2785        p->swap_file = NULL;
2786        p->max = 0;
2787        swap_map = p->swap_map;
2788        p->swap_map = NULL;
2789        zeromap = p->zeromap;
2790        p->zeromap = NULL;
2791        cluster_info = p->cluster_info;
2792        p->cluster_info = NULL;
2793        spin_unlock(&p->lock);
2794        spin_unlock(&swap_lock);
2795        arch_swap_invalidate_area(p->type);
2796        zswap_swapoff(p->type);
2797        mutex_unlock(&swapon_mutex);
2798        kfree(p->global_cluster);
2799        p->global_cluster = NULL;
2800        vfree(swap_map);
2801        kvfree(zeromap);
2802        kvfree(cluster_info);
2803        /* Destroy swap account information */
2804        swap_cgroup_swapoff(p->type);
2805        exit_swap_address_space(p->type);
2806
2807        inode = mapping->host;
2808
2809        inode_lock(inode);
2810        inode->i_flags &= ~S_SWAPFILE;
2811        inode_unlock(inode);
2812        filp_close(swap_file, NULL);
2813
2814        /*
2815         * Clear the SWP_USED flag after all resources are freed so that swapon
2816         * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2817         * not hold p->lock after we cleared its SWP_WRITEOK.
2818         */
2819        spin_lock(&swap_lock);
2820        p->flags = 0;
2821        spin_unlock(&swap_lock);
2822
2823        err = 0;
2824        atomic_inc(&proc_poll_event);
2825        wake_up_interruptible(&proc_poll_wait);
2826
2827out_dput:
2828        filp_close(victim, NULL);
2829out:
2830        putname(pathname);
2831        return err;
2832}
2833
2834#ifdef CONFIG_PROC_FS
2835static __poll_t swaps_poll(struct file *file, poll_table *wait)
2836{
2837        struct seq_file *seq = file->private_data;
2838
2839        poll_wait(file, &proc_poll_wait, wait);
2840
2841        if (seq->poll_event != atomic_read(&proc_poll_event)) {
2842                seq->poll_event = atomic_read(&proc_poll_event);
2843                return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2844        }
2845
2846        return EPOLLIN | EPOLLRDNORM;
2847}
2848
2849/* iterator */
2850static void *swap_start(struct seq_file *swap, loff_t *pos)
2851{
2852        struct swap_info_struct *si;
2853        int type;
2854        loff_t l = *pos;
2855
2856        mutex_lock(&swapon_mutex);
2857
2858        if (!l)
2859                return SEQ_START_TOKEN;
2860
2861        for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2862                if (!(si->flags & SWP_USED) || !si->swap_map)
2863                        continue;
2864                if (!--l)
2865                        return si;
2866        }
2867
2868        return NULL;
2869}
2870
2871static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2872{
2873        struct swap_info_struct *si = v;
2874        int type;
2875
2876        if (v == SEQ_START_TOKEN)
2877                type = 0;
2878        else
2879                type = si->type + 1;
2880
2881        ++(*pos);
2882        for (; (si = swap_type_to_swap_info(type)); type++) {
2883                if (!(si->flags & SWP_USED) || !si->swap_map)
2884                        continue;
2885                return si;
2886        }
2887
2888        return NULL;
2889}
2890
2891static void swap_stop(struct seq_file *swap, void *v)
2892{
2893        mutex_unlock(&swapon_mutex);
2894}
2895
2896static int swap_show(struct seq_file *swap, void *v)
2897{
2898        struct swap_info_struct *si = v;
2899        struct file *file;
2900        int len;
2901        unsigned long bytes, inuse;
2902
2903        if (si == SEQ_START_TOKEN) {
2904                seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2905                return 0;
2906        }
2907
2908        bytes = K(si->pages);
2909        inuse = K(swap_usage_in_pages(si));
2910
2911        file = si->swap_file;
2912        len = seq_file_path(swap, file, " \t\n\\");
2913        seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
2914                        len < 40 ? 40 - len : 1, " ",
2915                        S_ISBLK(file_inode(file)->i_mode) ?
2916                                "partition" : "file\t",
2917                        bytes, bytes < 10000000 ? "\t" : "",
2918                        inuse, inuse < 10000000 ? "\t" : "",
2919                        si->prio);
2920        return 0;
2921}
2922
2923static const struct seq_operations swaps_op = {
2924        .start =        swap_start,
2925        .next =         swap_next,
2926        .stop =         swap_stop,
2927        .show =         swap_show
2928};
2929
2930static int swaps_open(struct inode *inode, struct file *file)
2931{
2932        struct seq_file *seq;
2933        int ret;
2934
2935        ret = seq_open(file, &swaps_op);
2936        if (ret)
2937                return ret;
2938
2939        seq = file->private_data;
2940        seq->poll_event = atomic_read(&proc_poll_event);
2941        return 0;
2942}
2943
2944static const struct proc_ops swaps_proc_ops = {
2945        .proc_flags     = PROC_ENTRY_PERMANENT,
2946        .proc_open      = swaps_open,
2947        .proc_read      = seq_read,
2948        .proc_lseek     = seq_lseek,
2949        .proc_release   = seq_release,
2950        .proc_poll      = swaps_poll,
2951};
2952
2953static int __init procswaps_init(void)
2954{
2955        proc_create("swaps", 0, NULL, &swaps_proc_ops);
2956        return 0;
2957}
2958__initcall(procswaps_init);
2959#endif /* CONFIG_PROC_FS */
2960
2961#ifdef MAX_SWAPFILES_CHECK
2962static int __init max_swapfiles_check(void)
2963{
2964        MAX_SWAPFILES_CHECK();
2965        return 0;
2966}
2967late_initcall(max_swapfiles_check);
2968#endif
2969
2970static struct swap_info_struct *alloc_swap_info(void)
2971{
2972        struct swap_info_struct *p;
2973        struct swap_info_struct *defer = NULL;
2974        unsigned int type;
2975        int i;
2976
2977        p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2978        if (!p)
2979                return ERR_PTR(-ENOMEM);
2980
2981        if (percpu_ref_init(&p->users, swap_users_ref_free,
2982                            PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2983                kvfree(p);
2984                return ERR_PTR(-ENOMEM);
2985        }
2986
2987        spin_lock(&swap_lock);
2988        for (type = 0; type < nr_swapfiles; type++) {
2989                if (!(swap_info[type]->flags & SWP_USED))
2990                        break;
2991        }
2992        if (type >= MAX_SWAPFILES) {
2993                spin_unlock(&swap_lock);
2994                percpu_ref_exit(&p->users);
2995                kvfree(p);
2996                return ERR_PTR(-EPERM);
2997        }
2998        if (type >= nr_swapfiles) {
2999                p->type = type;
3000                /*
3001                 * Publish the swap_info_struct after initializing it.
3002                 * Note that kvzalloc() above zeroes all its fields.
3003                 */
3004                smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
3005                nr_swapfiles++;
3006        } else {
3007                defer = p;
3008                p = swap_info[type];
3009                /*
3010                 * Do not memset this entry: a racing procfs swap_next()
3011                 * would be relying on p->type to remain valid.
3012                 */
3013        }
3014        p->swap_extent_root = RB_ROOT;
3015        plist_node_init(&p->list, 0);
3016        for_each_node(i)
3017                plist_node_init(&p->avail_lists[i], 0);
3018        p->flags = SWP_USED;
3019        spin_unlock(&swap_lock);
3020        if (defer) {
3021                percpu_ref_exit(&defer->users);
3022                kvfree(defer);
3023        }
3024        spin_lock_init(&p->lock);
3025        spin_lock_init(&p->cont_lock);
3026        atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT);
3027        init_completion(&p->comp);
3028
3029        return p;
3030}
3031
3032static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
3033{
3034        if (S_ISBLK(inode->i_mode)) {
3035                si->bdev = I_BDEV(inode);
3036                /*
3037                 * Zoned block devices contain zones that have a sequential
3038                 * write only restriction.  Hence zoned block devices are not
3039                 * suitable for swapping.  Disallow them here.
3040                 */
3041                if (bdev_is_zoned(si->bdev))
3042                        return -EINVAL;
3043                si->flags |= SWP_BLKDEV;
3044        } else if (S_ISREG(inode->i_mode)) {
3045                si->bdev = inode->i_sb->s_bdev;
3046        }
3047
3048        return 0;
3049}
3050
3051
3052/*
3053 * Find out how many pages are allowed for a single swap device. There
3054 * are two limiting factors:
3055 * 1) the number of bits for the swap offset in the swp_entry_t type, and
3056 * 2) the number of bits in the swap pte, as defined by the different
3057 * architectures.
3058 *
3059 * In order to find the largest possible bit mask, a swap entry with
3060 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
3061 * decoded to a swp_entry_t again, and finally the swap offset is
3062 * extracted.
3063 *
3064 * This will mask all the bits from the initial ~0UL mask that can't
3065 * be encoded in either the swp_entry_t or the architecture definition
3066 * of a swap pte.
3067 */
3068unsigned long generic_max_swapfile_size(void)
3069{
3070        return swp_offset(pte_to_swp_entry(
3071                        swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
3072}
3073
3074/* Can be overridden by an architecture for additional checks. */
3075__weak unsigned long arch_max_swapfile_size(void)
3076{
3077        return generic_max_swapfile_size();
3078}
3079
3080static unsigned long read_swap_header(struct swap_info_struct *si,
3081                                        union swap_header *swap_header,
3082                                        struct inode *inode)
3083{
3084        int i;
3085        unsigned long maxpages;
3086        unsigned long swapfilepages;
3087        unsigned long last_page;
3088
3089        if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
3090                pr_err("Unable to find swap-space signature\n");
3091                return 0;
3092        }
3093
3094        /* swap partition endianness hack... */
3095        if (swab32(swap_header->info.version) == 1) {
3096                swab32s(&swap_header->info.version);
3097                swab32s(&swap_header->info.last_page);
3098                swab32s(&swap_header->info.nr_badpages);
3099                if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3100                        return 0;
3101                for (i = 0; i < swap_header->info.nr_badpages; i++)
3102                        swab32s(&swap_header->info.badpages[i]);
3103        }
3104        /* Check the swap header's sub-version */
3105        if (swap_header->info.version != 1) {
3106                pr_warn("Unable to handle swap header version %d\n",
3107                        swap_header->info.version);
3108                return 0;
3109        }
3110
3111        maxpages = swapfile_maximum_size;
3112        last_page = swap_header->info.last_page;
3113        if (!last_page) {
3114                pr_warn("Empty swap-file\n");
3115                return 0;
3116        }
3117        if (last_page > maxpages) {
3118                pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
3119                        K(maxpages), K(last_page));
3120        }
3121        if (maxpages > last_page) {
3122                maxpages = last_page + 1;
3123                /* p->max is an unsigned int: don't overflow it */
3124                if ((unsigned int)maxpages == 0)
3125                        maxpages = UINT_MAX;
3126        }
3127
3128        if (!maxpages)
3129                return 0;
3130        swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3131        if (swapfilepages && maxpages > swapfilepages) {
3132                pr_warn("Swap area shorter than signature indicates\n");
3133                return 0;
3134        }
3135        if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3136                return 0;
3137        if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3138                return 0;
3139
3140        return maxpages;
3141}
3142
3143static int setup_swap_map(struct swap_info_struct *si,
3144                          union swap_header *swap_header,
3145                          unsigned char *swap_map,
3146                          unsigned long maxpages)
3147{
3148        unsigned long i;
3149
3150        swap_map[0] = SWAP_MAP_BAD; /* omit header page */
3151        for (i = 0; i < swap_header->info.nr_badpages; i++) {
3152                unsigned int page_nr = swap_header->info.badpages[i];
3153                if (page_nr == 0 || page_nr > swap_header->info.last_page)
3154                        return -EINVAL;
3155                if (page_nr < maxpages) {
3156                        swap_map[page_nr] = SWAP_MAP_BAD;
3157                        si->pages--;
3158                }
3159        }
3160
3161        if (!si->pages) {
3162                pr_warn("Empty swap-file\n");
3163                return -EINVAL;
3164        }
3165
3166        return 0;
3167}
3168
3169#define SWAP_CLUSTER_INFO_COLS                                          \
3170        DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3171#define SWAP_CLUSTER_SPACE_COLS                                         \
3172        DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3173#define SWAP_CLUSTER_COLS                                               \
3174        max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3175
3176static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
3177                                                union swap_header *swap_header,
3178                                                unsigned long maxpages)
3179{
3180        unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3181        struct swap_cluster_info *cluster_info;
3182        unsigned long i, j, idx;
3183        int err = -ENOMEM;
3184
3185        cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
3186        if (!cluster_info)
3187                goto err;
3188
3189        for (i = 0; i < nr_clusters; i++)
3190                spin_lock_init(&cluster_info[i].lock);
3191
3192        if (!(si->flags & SWP_SOLIDSTATE)) {
3193                si->global_cluster = kmalloc(sizeof(*si->global_cluster),
3194                                     GFP_KERNEL);
3195                if (!si->global_cluster)
3196                        goto err_free;
3197                for (i = 0; i < SWAP_NR_ORDERS; i++)
3198                        si->global_cluster->next[i] = SWAP_ENTRY_INVALID;
3199                spin_lock_init(&si->global_cluster_lock);
3200        }
3201
3202        /*
3203         * Mark unusable pages as unavailable. The clusters aren't
3204         * marked free yet, so no list operations are involved yet.
3205         *
3206         * See setup_swap_map(): header page, bad pages,
3207         * and the EOF part of the last cluster.
3208         */
3209        inc_cluster_info_page(si, cluster_info, 0);
3210        for (i = 0; i < swap_header->info.nr_badpages; i++) {
3211                unsigned int page_nr = swap_header->info.badpages[i];
3212
3213                if (page_nr >= maxpages)
3214                        continue;
3215                inc_cluster_info_page(si, cluster_info, page_nr);
3216        }
3217        for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3218                inc_cluster_info_page(si, cluster_info, i);
3219
3220        INIT_LIST_HEAD(&si->free_clusters);
3221        INIT_LIST_HEAD(&si->full_clusters);
3222        INIT_LIST_HEAD(&si->discard_clusters);
3223
3224        for (i = 0; i < SWAP_NR_ORDERS; i++) {
3225                INIT_LIST_HEAD(&si->nonfull_clusters[i]);
3226                INIT_LIST_HEAD(&si->frag_clusters[i]);
3227                atomic_long_set(&si->frag_cluster_nr[i], 0);
3228        }
3229
3230        /*
3231         * Reduce false cache line sharing between cluster_info and
3232         * sharing same address space.
3233         */
3234        for (j = 0; j < SWAP_CLUSTER_COLS; j++) {
3235                for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3236                        struct swap_cluster_info *ci;
3237                        idx = i * SWAP_CLUSTER_COLS + j;
3238                        ci = cluster_info + idx;
3239                        if (idx >= nr_clusters)
3240                                continue;
3241                        if (ci->count) {
3242                                ci->flags = CLUSTER_FLAG_NONFULL;
3243                                list_add_tail(&ci->list, &si->nonfull_clusters[0]);
3244                                continue;
3245                        }
3246                        ci->flags = CLUSTER_FLAG_FREE;
3247                        list_add_tail(&ci->list, &si->free_clusters);
3248                }
3249        }
3250
3251        return cluster_info;
3252
3253err_free:
3254        kvfree(cluster_info);
3255err:
3256        return ERR_PTR(err);
3257}
3258
3259SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3260{
3261        struct swap_info_struct *si;
3262        struct filename *name;
3263        struct file *swap_file = NULL;
3264        struct address_space *mapping;
3265        struct dentry *dentry;
3266        int prio;
3267        int error;
3268        union swap_header *swap_header;
3269        int nr_extents;
3270        sector_t span;
3271        unsigned long maxpages;
3272        unsigned char *swap_map = NULL;
3273        unsigned long *zeromap = NULL;
3274        struct swap_cluster_info *cluster_info = NULL;
3275        struct folio *folio = NULL;
3276        struct inode *inode = NULL;
3277        bool inced_nr_rotate_swap = false;
3278
3279        if (swap_flags & ~SWAP_FLAGS_VALID)
3280                return -EINVAL;
3281
3282        if (!capable(CAP_SYS_ADMIN))
3283                return -EPERM;
3284
3285        if (!swap_avail_heads)
3286                return -ENOMEM;
3287
3288        si = alloc_swap_info();
3289        if (IS_ERR(si))
3290                return PTR_ERR(si);
3291
3292        INIT_WORK(&si->discard_work, swap_discard_work);
3293        INIT_WORK(&si->reclaim_work, swap_reclaim_work);
3294
3295        name = getname(specialfile);
3296        if (IS_ERR(name)) {
3297                error = PTR_ERR(name);
3298                name = NULL;
3299                goto bad_swap;
3300        }
3301        swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
3302        if (IS_ERR(swap_file)) {
3303                error = PTR_ERR(swap_file);
3304                swap_file = NULL;
3305                goto bad_swap;
3306        }
3307
3308        si->swap_file = swap_file;
3309        mapping = swap_file->f_mapping;
3310        dentry = swap_file->f_path.dentry;
3311        inode = mapping->host;
3312
3313        error = claim_swapfile(si, inode);
3314        if (unlikely(error))
3315                goto bad_swap;
3316
3317        inode_lock(inode);
3318        if (d_unlinked(dentry) || cant_mount(dentry)) {
3319                error = -ENOENT;
3320                goto bad_swap_unlock_inode;
3321        }
3322        if (IS_SWAPFILE(inode)) {
3323                error = -EBUSY;
3324                goto bad_swap_unlock_inode;
3325        }
3326
3327        /*
3328         * The swap subsystem needs a major overhaul to support this.
3329         * It doesn't work yet so just disable it for now.
3330         */
3331        if (mapping_min_folio_order(mapping) > 0) {
3332                error = -EINVAL;
3333                goto bad_swap_unlock_inode;
3334        }
3335
3336        /*
3337         * Read the swap header.
3338         */
3339        if (!mapping->a_ops->read_folio) {
3340                error = -EINVAL;
3341                goto bad_swap_unlock_inode;
3342        }
3343        folio = read_mapping_folio(mapping, 0, swap_file);
3344        if (IS_ERR(folio)) {
3345                error = PTR_ERR(folio);
3346                goto bad_swap_unlock_inode;
3347        }
3348        swap_header = kmap_local_folio(folio, 0);
3349
3350        maxpages = read_swap_header(si, swap_header, inode);
3351        if (unlikely(!maxpages)) {
3352                error = -EINVAL;
3353                goto bad_swap_unlock_inode;
3354        }
3355
3356        si->max = maxpages;
3357        si->pages = maxpages - 1;
3358        nr_extents = setup_swap_extents(si, &span);
3359        if (nr_extents < 0) {
3360                error = nr_extents;
3361                goto bad_swap_unlock_inode;
3362        }
3363        if (si->pages != si->max - 1) {
3364                pr_err("swap:%u != (max:%u - 1)\n", si->pages, si->max);
3365                error = -EINVAL;
3366                goto bad_swap_unlock_inode;
3367        }
3368
3369        maxpages = si->max;
3370
3371        /* OK, set up the swap map and apply the bad block list */
3372        swap_map = vzalloc(maxpages);
3373        if (!swap_map) {
3374                error = -ENOMEM;
3375                goto bad_swap_unlock_inode;
3376        }
3377
3378        error = swap_cgroup_swapon(si->type, maxpages);
3379        if (error)
3380                goto bad_swap_unlock_inode;
3381
3382        error = setup_swap_map(si, swap_header, swap_map, maxpages);
3383        if (error)
3384                goto bad_swap_unlock_inode;
3385
3386        /*
3387         * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
3388         * be above MAX_PAGE_ORDER incase of a large swap file.
3389         */
3390        zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long),
3391                                    GFP_KERNEL | __GFP_ZERO);
3392        if (!zeromap) {
3393                error = -ENOMEM;
3394                goto bad_swap_unlock_inode;
3395        }
3396
3397        if (si->bdev && bdev_stable_writes(si->bdev))
3398                si->flags |= SWP_STABLE_WRITES;
3399
3400        if (si->bdev && bdev_synchronous(si->bdev))
3401                si->flags |= SWP_SYNCHRONOUS_IO;
3402
3403        if (si->bdev && bdev_nonrot(si->bdev)) {
3404                si->flags |= SWP_SOLIDSTATE;
3405        } else {
3406                atomic_inc(&nr_rotate_swap);
3407                inced_nr_rotate_swap = true;
3408        }
3409
3410        cluster_info = setup_clusters(si, swap_header, maxpages);
3411        if (IS_ERR(cluster_info)) {
3412                error = PTR_ERR(cluster_info);
3413                cluster_info = NULL;
3414                goto bad_swap_unlock_inode;
3415        }
3416
3417        if ((swap_flags & SWAP_FLAG_DISCARD) &&
3418            si->bdev && bdev_max_discard_sectors(si->bdev)) {
3419                /*
3420                 * When discard is enabled for swap with no particular
3421                 * policy flagged, we set all swap discard flags here in
3422                 * order to sustain backward compatibility with older
3423                 * swapon(8) releases.
3424                 */
3425                si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3426                             SWP_PAGE_DISCARD);
3427
3428                /*
3429                 * By flagging sys_swapon, a sysadmin can tell us to
3430                 * either do single-time area discards only, or to just
3431                 * perform discards for released swap page-clusters.
3432                 * Now it's time to adjust the p->flags accordingly.
3433                 */
3434                if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3435                        si->flags &= ~SWP_PAGE_DISCARD;
3436                else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3437                        si->flags &= ~SWP_AREA_DISCARD;
3438
3439                /* issue a swapon-time discard if it's still required */
3440                if (si->flags & SWP_AREA_DISCARD) {
3441                        int err = discard_swap(si);
3442                        if (unlikely(err))
3443                                pr_err("swapon: discard_swap(%p): %d\n",
3444                                        si, err);
3445                }
3446        }
3447
3448        error = init_swap_address_space(si->type, maxpages);
3449        if (error)
3450                goto bad_swap_unlock_inode;
3451
3452        error = zswap_swapon(si->type, maxpages);
3453        if (error)
3454                goto free_swap_address_space;
3455
3456        /*
3457         * Flush any pending IO and dirty mappings before we start using this
3458         * swap device.
3459         */
3460        inode->i_flags |= S_SWAPFILE;
3461        error = inode_drain_writes(inode);
3462        if (error) {
3463                inode->i_flags &= ~S_SWAPFILE;
3464                goto free_swap_zswap;
3465        }
3466
3467        mutex_lock(&swapon_mutex);
3468        prio = -1;
3469        if (swap_flags & SWAP_FLAG_PREFER)
3470                prio = swap_flags & SWAP_FLAG_PRIO_MASK;
3471        enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
3472
3473        pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s\n",
3474                K(si->pages), name->name, si->prio, nr_extents,
3475                K((unsigned long long)span),
3476                (si->flags & SWP_SOLIDSTATE) ? "SS" : "",
3477                (si->flags & SWP_DISCARDABLE) ? "D" : "",
3478                (si->flags & SWP_AREA_DISCARD) ? "s" : "",
3479                (si->flags & SWP_PAGE_DISCARD) ? "c" : "");
3480
3481        mutex_unlock(&swapon_mutex);
3482        atomic_inc(&proc_poll_event);
3483        wake_up_interruptible(&proc_poll_wait);
3484
3485        error = 0;
3486        goto out;
3487free_swap_zswap:
3488        zswap_swapoff(si->type);
3489free_swap_address_space:
3490        exit_swap_address_space(si->type);
3491bad_swap_unlock_inode:
3492        inode_unlock(inode);
3493bad_swap:
3494        kfree(si->global_cluster);
3495        si->global_cluster = NULL;
3496        inode = NULL;
3497        destroy_swap_extents(si);
3498        swap_cgroup_swapoff(si->type);
3499        spin_lock(&swap_lock);
3500        si->swap_file = NULL;
3501        si->flags = 0;
3502        spin_unlock(&swap_lock);
3503        vfree(swap_map);
3504        kvfree(zeromap);
3505        kvfree(cluster_info);
3506        if (inced_nr_rotate_swap)
3507                atomic_dec(&nr_rotate_swap);
3508        if (swap_file)
3509                filp_close(swap_file, NULL);
3510out:
3511        if (!IS_ERR_OR_NULL(folio))
3512                folio_release_kmap(folio, swap_header);
3513        if (name)
3514                putname(name);
3515        if (inode)
3516                inode_unlock(inode);
3517        return error;
3518}
3519
3520void si_swapinfo(struct sysinfo *val)
3521{
3522        unsigned int type;
3523        unsigned long nr_to_be_unused = 0;
3524
3525        spin_lock(&swap_lock);
3526        for (type = 0; type < nr_swapfiles; type++) {
3527                struct swap_info_struct *si = swap_info[type];
3528
3529                if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3530                        nr_to_be_unused += swap_usage_in_pages(si);
3531        }
3532        val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3533        val->totalswap = total_swap_pages + nr_to_be_unused;
3534        spin_unlock(&swap_lock);
3535}
3536
3537/*
3538 * Verify that nr swap entries are valid and increment their swap map counts.
3539 *
3540 * Returns error code in following case.
3541 * - success -> 0
3542 * - swp_entry is invalid -> EINVAL
3543 * - swap-cache reference is requested but there is already one. -> EEXIST
3544 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3545 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3546 */
3547static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
3548{
3549        struct swap_info_struct *si;
3550        struct swap_cluster_info *ci;
3551        unsigned long offset;
3552        unsigned char count;
3553        unsigned char has_cache;
3554        int err, i;
3555
3556        si = swp_swap_info(entry);
3557        if (WARN_ON_ONCE(!si)) {
3558                pr_err("%s%08lx\n", Bad_file, entry.val);
3559                return -EINVAL;
3560        }
3561
3562        offset = swp_offset(entry);
3563        VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
3564        VM_WARN_ON(usage == 1 && nr > 1);
3565        ci = lock_cluster(si, offset);
3566
3567        err = 0;
3568        for (i = 0; i < nr; i++) {
3569                count = si->swap_map[offset + i];
3570
3571                /*
3572                 * swapin_readahead() doesn't check if a swap entry is valid, so the
3573                 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3574                 */
3575                if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3576                        err = -ENOENT;
3577                        goto unlock_out;
3578                }
3579
3580                has_cache = count & SWAP_HAS_CACHE;
3581                count &= ~SWAP_HAS_CACHE;
3582
3583                if (!count && !has_cache) {
3584                        err = -ENOENT;
3585                } else if (usage == SWAP_HAS_CACHE) {
3586                        if (has_cache)
3587                                err = -EEXIST;
3588                } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) {
3589                        err = -EINVAL;
3590                }
3591
3592                if (err)
3593                        goto unlock_out;
3594        }
3595
3596        for (i = 0; i < nr; i++) {
3597                count = si->swap_map[offset + i];
3598                has_cache = count & SWAP_HAS_CACHE;
3599                count &= ~SWAP_HAS_CACHE;
3600
3601                if (usage == SWAP_HAS_CACHE)
3602                        has_cache = SWAP_HAS_CACHE;
3603                else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3604                        count += usage;
3605                else if (swap_count_continued(si, offset + i, count))
3606                        count = COUNT_CONTINUED;
3607                else {
3608                        /*
3609                         * Don't need to rollback changes, because if
3610                         * usage == 1, there must be nr == 1.
3611                         */
3612                        err = -ENOMEM;
3613                        goto unlock_out;
3614                }
3615
3616                WRITE_ONCE(si->swap_map[offset + i], count | has_cache);
3617        }
3618
3619unlock_out:
3620        unlock_cluster(ci);
3621        return err;
3622}
3623
3624/*
3625 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3626 * (in which case its reference count is never incremented).
3627 */
3628void swap_shmem_alloc(swp_entry_t entry, int nr)
3629{
3630        __swap_duplicate(entry, SWAP_MAP_SHMEM, nr);
3631}
3632
3633/*
3634 * Increase reference count of swap entry by 1.
3635 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3636 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3637 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3638 * might occur if a page table entry has got corrupted.
3639 */
3640int swap_duplicate(swp_entry_t entry)
3641{
3642        int err = 0;
3643
3644        while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
3645                err = add_swap_count_continuation(entry, GFP_ATOMIC);
3646        return err;
3647}
3648
3649/*
3650 * @entry: first swap entry from which we allocate nr swap cache.
3651 *
3652 * Called when allocating swap cache for existing swap entries,
3653 * This can return error codes. Returns 0 at success.
3654 * -EEXIST means there is a swap cache.
3655 * Note: return code is different from swap_duplicate().
3656 */
3657int swapcache_prepare(swp_entry_t entry, int nr)
3658{
3659        return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
3660}
3661
3662/*
3663 * Caller should ensure entries belong to the same folio so
3664 * the entries won't span cross cluster boundary.
3665 */
3666void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
3667{
3668        swap_entries_put_cache(si, entry, nr);
3669}
3670
3671struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3672{
3673        return swap_type_to_swap_info(swp_type(entry));
3674}
3675
3676/*
3677 * add_swap_count_continuation - called when a swap count is duplicated
3678 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3679 * page of the original vmalloc'ed swap_map, to hold the continuation count
3680 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3681 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3682 *
3683 * These continuation pages are seldom referenced: the common paths all work
3684 * on the original swap_map, only referring to a continuation page when the
3685 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3686 *
3687 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3688 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3689 * can be called after dropping locks.
3690 */
3691int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3692{
3693        struct swap_info_struct *si;
3694        struct swap_cluster_info *ci;
3695        struct page *head;
3696        struct page *page;
3697        struct page *list_page;
3698        pgoff_t offset;
3699        unsigned char count;
3700        int ret = 0;
3701
3702        /*
3703         * When debugging, it's easier to use __GFP_ZERO here; but it's better
3704         * for latency not to zero a page while GFP_ATOMIC and holding locks.
3705         */
3706        page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3707
3708        si = get_swap_device(entry);
3709        if (!si) {
3710                /*
3711                 * An acceptable race has occurred since the failing
3712                 * __swap_duplicate(): the swap device may be swapoff
3713                 */
3714                goto outer;
3715        }
3716
3717        offset = swp_offset(entry);
3718
3719        ci = lock_cluster(si, offset);
3720
3721        count = swap_count(si->swap_map[offset]);
3722
3723        if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3724                /*
3725                 * The higher the swap count, the more likely it is that tasks
3726                 * will race to add swap count continuation: we need to avoid
3727                 * over-provisioning.
3728                 */
3729                goto out;
3730        }
3731
3732        if (!page) {
3733                ret = -ENOMEM;
3734                goto out;
3735        }
3736
3737        head = vmalloc_to_page(si->swap_map + offset);
3738        offset &= ~PAGE_MASK;
3739
3740        spin_lock(&si->cont_lock);
3741        /*
3742         * Page allocation does not initialize the page's lru field,
3743         * but it does always reset its private field.
3744         */
3745        if (!page_private(head)) {
3746                BUG_ON(count & COUNT_CONTINUED);
3747                INIT_LIST_HEAD(&head->lru);
3748                set_page_private(head, SWP_CONTINUED);
3749                si->flags |= SWP_CONTINUED;
3750        }
3751
3752        list_for_each_entry(list_page, &head->lru, lru) {
3753                unsigned char *map;
3754
3755                /*
3756                 * If the previous map said no continuation, but we've found
3757                 * a continuation page, free our allocation and use this one.
3758                 */
3759                if (!(count & COUNT_CONTINUED))
3760                        goto out_unlock_cont;
3761
3762                map = kmap_local_page(list_page) + offset;
3763                count = *map;
3764                kunmap_local(map);
3765
3766                /*
3767                 * If this continuation count now has some space in it,
3768                 * free our allocation and use this one.
3769                 */
3770                if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3771                        goto out_unlock_cont;
3772        }
3773
3774        list_add_tail(&page->lru, &head->lru);
3775        page = NULL;                    /* now it's attached, don't free it */
3776out_unlock_cont:
3777        spin_unlock(&si->cont_lock);
3778out:
3779        unlock_cluster(ci);
3780        put_swap_device(si);
3781outer:
3782        if (page)
3783                __free_page(page);
3784        return ret;
3785}
3786
3787/*
3788 * swap_count_continued - when the original swap_map count is incremented
3789 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3790 * into, carry if so, or else fail until a new continuation page is allocated;
3791 * when the original swap_map count is decremented from 0 with continuation,
3792 * borrow from the continuation and report whether it still holds more.
3793 * Called while __swap_duplicate() or caller of swap_entry_put_locked()
3794 * holds cluster lock.
3795 */
3796static bool swap_count_continued(struct swap_info_struct *si,
3797                                 pgoff_t offset, unsigned char count)
3798{
3799        struct page *head;
3800        struct page *page;
3801        unsigned char *map;
3802        bool ret;
3803
3804        head = vmalloc_to_page(si->swap_map + offset);
3805        if (page_private(head) != SWP_CONTINUED) {
3806                BUG_ON(count & COUNT_CONTINUED);
3807                return false;           /* need to add count continuation */
3808        }
3809
3810        spin_lock(&si->cont_lock);
3811        offset &= ~PAGE_MASK;
3812        page = list_next_entry(head, lru);
3813        map = kmap_local_page(page) + offset;
3814
3815        if (count == SWAP_MAP_MAX)      /* initial increment from swap_map */
3816                goto init_map;          /* jump over SWAP_CONT_MAX checks */
3817
3818        if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3819                /*
3820                 * Think of how you add 1 to 999
3821                 */
3822                while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3823                        kunmap_local(map);
3824                        page = list_next_entry(page, lru);
3825                        BUG_ON(page == head);
3826                        map = kmap_local_page(page) + offset;
3827                }
3828                if (*map == SWAP_CONT_MAX) {
3829                        kunmap_local(map);
3830                        page = list_next_entry(page, lru);
3831                        if (page == head) {
3832                                ret = false;    /* add count continuation */
3833                                goto out;
3834                        }
3835                        map = kmap_local_page(page) + offset;
3836init_map:               *map = 0;               /* we didn't zero the page */
3837                }
3838                *map += 1;
3839                kunmap_local(map);
3840                while ((page = list_prev_entry(page, lru)) != head) {
3841                        map = kmap_local_page(page) + offset;
3842                        *map = COUNT_CONTINUED;
3843                        kunmap_local(map);
3844                }
3845                ret = true;                     /* incremented */
3846
3847        } else {                                /* decrementing */
3848                /*
3849                 * Think of how you subtract 1 from 1000
3850                 */
3851                BUG_ON(count != COUNT_CONTINUED);
3852                while (*map == COUNT_CONTINUED) {
3853                        kunmap_local(map);
3854                        page = list_next_entry(page, lru);
3855                        BUG_ON(page == head);
3856                        map = kmap_local_page(page) + offset;
3857                }
3858                BUG_ON(*map == 0);
3859                *map -= 1;
3860                if (*map == 0)
3861                        count = 0;
3862                kunmap_local(map);
3863                while ((page = list_prev_entry(page, lru)) != head) {
3864                        map = kmap_local_page(page) + offset;
3865                        *map = SWAP_CONT_MAX | count;
3866                        count = COUNT_CONTINUED;
3867                        kunmap_local(map);
3868                }
3869                ret = count == COUNT_CONTINUED;
3870        }
3871out:
3872        spin_unlock(&si->cont_lock);
3873        return ret;
3874}
3875
3876/*
3877 * free_swap_count_continuations - swapoff free all the continuation pages
3878 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3879 */
3880static void free_swap_count_continuations(struct swap_info_struct *si)
3881{
3882        pgoff_t offset;
3883
3884        for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3885                struct page *head;
3886                head = vmalloc_to_page(si->swap_map + offset);
3887                if (page_private(head)) {
3888                        struct page *page, *next;
3889
3890                        list_for_each_entry_safe(page, next, &head->lru, lru) {
3891                                list_del(&page->lru);
3892                                __free_page(page);
3893                        }
3894                }
3895        }
3896}
3897
3898#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3899static bool __has_usable_swap(void)
3900{
3901        return !plist_head_empty(&swap_active_head);
3902}
3903
3904void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
3905{
3906        struct swap_info_struct *si, *next;
3907        int nid = folio_nid(folio);
3908
3909        if (!(gfp & __GFP_IO))
3910                return;
3911
3912        if (!__has_usable_swap())
3913                return;
3914
3915        if (!blk_cgroup_congested())
3916                return;
3917
3918        /*
3919         * We've already scheduled a throttle, avoid taking the global swap
3920         * lock.
3921         */
3922        if (current->throttle_disk)
3923                return;
3924
3925        spin_lock(&swap_avail_lock);
3926        plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3927                                  avail_lists[nid]) {
3928                if (si->bdev) {
3929                        blkcg_schedule_throttle(si->bdev->bd_disk, true);
3930                        break;
3931                }
3932        }
3933        spin_unlock(&swap_avail_lock);
3934}
3935#endif
3936
3937static int __init swapfile_init(void)
3938{
3939        int nid;
3940
3941        swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3942                                         GFP_KERNEL);
3943        if (!swap_avail_heads) {
3944                pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3945                return -ENOMEM;
3946        }
3947
3948        for_each_node(nid)
3949                plist_head_init(&swap_avail_heads[nid]);
3950
3951        swapfile_maximum_size = arch_max_swapfile_size();
3952
3953#ifdef CONFIG_MIGRATION
3954        if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
3955                swap_migration_ad_supported = true;
3956#endif  /* CONFIG_MIGRATION */
3957
3958        return 0;
3959}
3960subsys_initcall(swapfile_init);
3961