linux/include/linux/swap.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SWAP_H
   3#define _LINUX_SWAP_H
   4
   5#include <linux/spinlock.h>
   6#include <linux/linkage.h>
   7#include <linux/mmzone.h>
   8#include <linux/list.h>
   9#include <linux/memcontrol.h>
  10#include <linux/sched.h>
  11#include <linux/node.h>
  12#include <linux/fs.h>
  13#include <linux/pagemap.h>
  14#include <linux/atomic.h>
  15#include <linux/page-flags.h>
  16#include <uapi/linux/mempolicy.h>
  17#include <asm/page.h>
  18
  19struct notifier_block;
  20
  21struct bio;
  22
  23struct pagevec;
  24
  25#define SWAP_FLAG_PREFER        0x8000  /* set if swap priority specified */
  26#define SWAP_FLAG_PRIO_MASK     0x7fff
  27#define SWAP_FLAG_PRIO_SHIFT    0
  28#define SWAP_FLAG_DISCARD       0x10000 /* enable discard for swap */
  29#define SWAP_FLAG_DISCARD_ONCE  0x20000 /* discard swap area at swapon-time */
  30#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
  31
  32#define SWAP_FLAGS_VALID        (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
  33                                 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
  34                                 SWAP_FLAG_DISCARD_PAGES)
  35#define SWAP_BATCH 64
  36
  37static inline int current_is_kswapd(void)
  38{
  39        return current->flags & PF_KSWAPD;
  40}
  41
  42/*
  43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
  44 * be swapped to.  The swap type and the offset into that swap type are
  45 * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
  46 * for the type means that the maximum number of swapcache pages is 27 bits
  47 * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
  48 * the type/offset into the pte as 5/27 as well.
  49 */
  50#define MAX_SWAPFILES_SHIFT     5
  51
  52/*
  53 * Use some of the swap files numbers for other purposes. This
  54 * is a convenient way to hook into the VM to trigger special
  55 * actions on faults.
  56 */
  57
  58#define SWP_SWAPIN_ERROR_NUM 1
  59#define SWP_SWAPIN_ERROR     (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
  60                             SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \
  61                             SWP_PTE_MARKER_NUM)
  62/*
  63 * PTE markers are used to persist information onto PTEs that are mapped with
  64 * file-backed memories.  As its name "PTE" hints, it should only be applied to
  65 * the leaves of pgtables.
  66 */
  67#ifdef CONFIG_PTE_MARKER
  68#define SWP_PTE_MARKER_NUM 1
  69#define SWP_PTE_MARKER     (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
  70                            SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
  71#else
  72#define SWP_PTE_MARKER_NUM 0
  73#endif
  74
  75/*
  76 * Unaddressable device memory support. See include/linux/hmm.h and
  77 * Documentation/vm/hmm.rst. Short description is we need struct pages for
  78 * device memory that is unaddressable (inaccessible) by CPU, so that we can
  79 * migrate part of a process memory to device memory.
  80 *
  81 * When a page is migrated from CPU to device, we set the CPU page table entry
  82 * to a special SWP_DEVICE_{READ|WRITE} entry.
  83 *
  84 * When a page is mapped by the device for exclusive access we set the CPU page
  85 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
  86 */
  87#ifdef CONFIG_DEVICE_PRIVATE
  88#define SWP_DEVICE_NUM 4
  89#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
  90#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
  91#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
  92#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
  93#else
  94#define SWP_DEVICE_NUM 0
  95#endif
  96
  97/*
  98 * Page migration support.
  99 *
 100 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
 101 * indicates that the referenced (part of) an anonymous page is exclusive to
 102 * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
 103 * (part of) an anonymous page that are mapped writable are exclusive to a
 104 * single process.
 105 */
 106#ifdef CONFIG_MIGRATION
 107#define SWP_MIGRATION_NUM 3
 108#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
 109#define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
 110#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
 111#else
 112#define SWP_MIGRATION_NUM 0
 113#endif
 114
 115/*
 116 * Handling of hardware poisoned pages with memory corruption.
 117 */
 118#ifdef CONFIG_MEMORY_FAILURE
 119#define SWP_HWPOISON_NUM 1
 120#define SWP_HWPOISON            MAX_SWAPFILES
 121#else
 122#define SWP_HWPOISON_NUM 0
 123#endif
 124
 125#define MAX_SWAPFILES \
 126        ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
 127        SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
 128        SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM)
 129
 130/*
 131 * Magic header for a swap area. The first part of the union is
 132 * what the swap magic looks like for the old (limited to 128MB)
 133 * swap area format, the second part of the union adds - in the
 134 * old reserved area - some extra information. Note that the first
 135 * kilobyte is reserved for boot loader or disk label stuff...
 136 *
 137 * Having the magic at the end of the PAGE_SIZE makes detecting swap
 138 * areas somewhat tricky on machines that support multiple page sizes.
 139 * For 2.5 we'll probably want to move the magic to just beyond the
 140 * bootbits...
 141 */
 142union swap_header {
 143        struct {
 144                char reserved[PAGE_SIZE - 10];
 145                char magic[10];                 /* SWAP-SPACE or SWAPSPACE2 */
 146        } magic;
 147        struct {
 148                char            bootbits[1024]; /* Space for disklabel etc. */
 149                __u32           version;
 150                __u32           last_page;
 151                __u32           nr_badpages;
 152                unsigned char   sws_uuid[16];
 153                unsigned char   sws_volume[16];
 154                __u32           padding[117];
 155                __u32           badpages[1];
 156        } info;
 157};
 158
 159/*
 160 * current->reclaim_state points to one of these when a task is running
 161 * memory reclaim
 162 */
 163struct reclaim_state {
 164        unsigned long reclaimed_slab;
 165};
 166
 167#ifdef __KERNEL__
 168
 169struct address_space;
 170struct sysinfo;
 171struct writeback_control;
 172struct zone;
 173
 174/*
 175 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
 176 * disk blocks.  A rbtree of swap extents maps the entire swapfile (Where the
 177 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
 178 * from setup, they're handled identically.
 179 *
 180 * We always assume that blocks are of size PAGE_SIZE.
 181 */
 182struct swap_extent {
 183        struct rb_node rb_node;
 184        pgoff_t start_page;
 185        pgoff_t nr_pages;
 186        sector_t start_block;
 187};
 188
 189/*
 190 * Max bad pages in the new format..
 191 */
 192#define MAX_SWAP_BADPAGES \
 193        ((offsetof(union swap_header, magic.magic) - \
 194          offsetof(union swap_header, info.badpages)) / sizeof(int))
 195
 196enum {
 197        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
 198        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
 199        SWP_DISCARDABLE = (1 << 2),     /* blkdev support discard */
 200        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
 201        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
 202        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
 203        SWP_BLKDEV      = (1 << 6),     /* its a block device */
 204        SWP_ACTIVATED   = (1 << 7),     /* set after swap_activate success */
 205        SWP_FS_OPS      = (1 << 8),     /* swapfile operations go through fs */
 206        SWP_AREA_DISCARD = (1 << 9),    /* single-time swap area discards */
 207        SWP_PAGE_DISCARD = (1 << 10),   /* freed swap page-cluster discards */
 208        SWP_STABLE_WRITES = (1 << 11),  /* no overwrite PG_writeback pages */
 209        SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
 210                                        /* add others here before... */
 211        SWP_SCANNING    = (1 << 14),    /* refcount in scan_swap_map */
 212};
 213
 214#define SWAP_CLUSTER_MAX 32UL
 215#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
 216
 217/* Bit flag in swap_map */
 218#define SWAP_HAS_CACHE  0x40    /* Flag page is cached, in first swap_map */
 219#define COUNT_CONTINUED 0x80    /* Flag swap_map continuation for full count */
 220
 221/* Special value in first swap_map */
 222#define SWAP_MAP_MAX    0x3e    /* Max count */
 223#define SWAP_MAP_BAD    0x3f    /* Note page is bad */
 224#define SWAP_MAP_SHMEM  0xbf    /* Owned by shmem/tmpfs */
 225
 226/* Special value in each swap_map continuation */
 227#define SWAP_CONT_MAX   0x7f    /* Max count */
 228
 229/*
 230 * We use this to track usage of a cluster. A cluster is a block of swap disk
 231 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
 232 * free clusters are organized into a list. We fetch an entry from the list to
 233 * get a free cluster.
 234 *
 235 * The data field stores next cluster if the cluster is free or cluster usage
 236 * counter otherwise. The flags field determines if a cluster is free. This is
 237 * protected by swap_info_struct.lock.
 238 */
 239struct swap_cluster_info {
 240        spinlock_t lock;        /*
 241                                 * Protect swap_cluster_info fields
 242                                 * and swap_info_struct->swap_map
 243                                 * elements correspond to the swap
 244                                 * cluster
 245                                 */
 246        unsigned int data:24;
 247        unsigned int flags:8;
 248};
 249#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
 250#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
 251#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
 252
 253/*
 254 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
 255 * its own cluster and swapout sequentially. The purpose is to optimize swapout
 256 * throughput.
 257 */
 258struct percpu_cluster {
 259        struct swap_cluster_info index; /* Current cluster index */
 260        unsigned int next; /* Likely next allocation offset */
 261};
 262
 263struct swap_cluster_list {
 264        struct swap_cluster_info head;
 265        struct swap_cluster_info tail;
 266};
 267
 268/*
 269 * The in-memory structure used to track swap areas.
 270 */
 271struct swap_info_struct {
 272        struct percpu_ref users;        /* indicate and keep swap device valid. */
 273        unsigned long   flags;          /* SWP_USED etc: see above */
 274        signed short    prio;           /* swap priority of this type */
 275        struct plist_node list;         /* entry in swap_active_head */
 276        signed char     type;           /* strange name for an index */
 277        unsigned int    max;            /* extent of the swap_map */
 278        unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
 279        struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
 280        struct swap_cluster_list free_clusters; /* free clusters list */
 281        unsigned int lowest_bit;        /* index of first free in swap_map */
 282        unsigned int highest_bit;       /* index of last free in swap_map */
 283        unsigned int pages;             /* total of usable pages of swap */
 284        unsigned int inuse_pages;       /* number of those currently in use */
 285        unsigned int cluster_next;      /* likely index for next allocation */
 286        unsigned int cluster_nr;        /* countdown to next cluster search */
 287        unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
 288        struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
 289        struct rb_root swap_extent_root;/* root of the swap extent rbtree */
 290        struct block_device *bdev;      /* swap device or bdev of swap file */
 291        struct file *swap_file;         /* seldom referenced */
 292        unsigned int old_block_size;    /* seldom referenced */
 293        struct completion comp;         /* seldom referenced */
 294#ifdef CONFIG_FRONTSWAP
 295        unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
 296        atomic_t frontswap_pages;       /* frontswap pages in-use counter */
 297#endif
 298        spinlock_t lock;                /*
 299                                         * protect map scan related fields like
 300                                         * swap_map, lowest_bit, highest_bit,
 301                                         * inuse_pages, cluster_next,
 302                                         * cluster_nr, lowest_alloc,
 303                                         * highest_alloc, free/discard cluster
 304                                         * list. other fields are only changed
 305                                         * at swapon/swapoff, so are protected
 306                                         * by swap_lock. changing flags need
 307                                         * hold this lock and swap_lock. If
 308                                         * both locks need hold, hold swap_lock
 309                                         * first.
 310                                         */
 311        spinlock_t cont_lock;           /*
 312                                         * protect swap count continuation page
 313                                         * list.
 314                                         */
 315        struct work_struct discard_work; /* discard worker */
 316        struct swap_cluster_list discard_clusters; /* discard clusters list */
 317        struct plist_node avail_lists[]; /*
 318                                           * entries in swap_avail_heads, one
 319                                           * entry per node.
 320                                           * Must be last as the number of the
 321                                           * array is nr_node_ids, which is not
 322                                           * a fixed value so have to allocate
 323                                           * dynamically.
 324                                           * And it has to be an array so that
 325                                           * plist_for_each_* can work.
 326                                           */
 327};
 328
 329#ifdef CONFIG_64BIT
 330#define SWAP_RA_ORDER_CEILING   5
 331#else
 332/* Avoid stack overflow, because we need to save part of page table */
 333#define SWAP_RA_ORDER_CEILING   3
 334#define SWAP_RA_PTE_CACHE_SIZE  (1 << SWAP_RA_ORDER_CEILING)
 335#endif
 336
 337struct vma_swap_readahead {
 338        unsigned short win;
 339        unsigned short offset;
 340        unsigned short nr_pte;
 341#ifdef CONFIG_64BIT
 342        pte_t *ptes;
 343#else
 344        pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
 345#endif
 346};
 347
 348static inline swp_entry_t folio_swap_entry(struct folio *folio)
 349{
 350        swp_entry_t entry = { .val = page_private(&folio->page) };
 351        return entry;
 352}
 353
 354/* linux/mm/workingset.c */
 355void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
 356void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
 357void workingset_refault(struct folio *folio, void *shadow);
 358void workingset_activation(struct folio *folio);
 359
 360/* Only track the nodes of mappings with shadow entries */
 361void workingset_update_node(struct xa_node *node);
 362extern struct list_lru shadow_nodes;
 363#define mapping_set_update(xas, mapping) do {                           \
 364        if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {         \
 365                xas_set_update(xas, workingset_update_node);            \
 366                xas_set_lru(xas, &shadow_nodes);                        \
 367        }                                                               \
 368} while (0)
 369
 370/* linux/mm/page_alloc.c */
 371extern unsigned long totalreserve_pages;
 372
 373/* Definition of global_zone_page_state not available yet */
 374#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
 375
 376
 377/* linux/mm/swap.c */
 378extern void lru_note_cost(struct lruvec *lruvec, bool file,
 379                          unsigned int nr_pages);
 380extern void lru_note_cost_folio(struct folio *);
 381extern void folio_add_lru(struct folio *);
 382extern void lru_cache_add(struct page *);
 383void mark_page_accessed(struct page *);
 384void folio_mark_accessed(struct folio *);
 385
 386extern atomic_t lru_disable_count;
 387
 388static inline bool lru_cache_disabled(void)
 389{
 390        return atomic_read(&lru_disable_count);
 391}
 392
 393static inline void lru_cache_enable(void)
 394{
 395        atomic_dec(&lru_disable_count);
 396}
 397
 398extern void lru_cache_disable(void);
 399extern void lru_add_drain(void);
 400extern void lru_add_drain_cpu(int cpu);
 401extern void lru_add_drain_cpu_zone(struct zone *zone);
 402extern void lru_add_drain_all(void);
 403extern void deactivate_page(struct page *page);
 404extern void mark_page_lazyfree(struct page *page);
 405extern void swap_setup(void);
 406
 407extern void lru_cache_add_inactive_or_unevictable(struct page *page,
 408                                                struct vm_area_struct *vma);
 409
 410/* linux/mm/vmscan.c */
 411extern unsigned long zone_reclaimable_pages(struct zone *zone);
 412extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 413                                        gfp_t gfp_mask, nodemask_t *mask);
 414extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 415                                                  unsigned long nr_pages,
 416                                                  gfp_t gfp_mask,
 417                                                  bool may_swap);
 418extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
 419                                                gfp_t gfp_mask, bool noswap,
 420                                                pg_data_t *pgdat,
 421                                                unsigned long *nr_scanned);
 422extern unsigned long shrink_all_memory(unsigned long nr_pages);
 423extern int vm_swappiness;
 424long remove_mapping(struct address_space *mapping, struct folio *folio);
 425
 426extern unsigned long reclaim_pages(struct list_head *page_list);
 427#ifdef CONFIG_NUMA
 428extern int node_reclaim_mode;
 429extern int sysctl_min_unmapped_ratio;
 430extern int sysctl_min_slab_ratio;
 431#else
 432#define node_reclaim_mode 0
 433#endif
 434
 435static inline bool node_reclaim_enabled(void)
 436{
 437        /* Is any node_reclaim_mode bit set? */
 438        return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
 439}
 440
 441extern void check_move_unevictable_pages(struct pagevec *pvec);
 442
 443extern void kswapd_run(int nid);
 444extern void kswapd_stop(int nid);
 445
 446#ifdef CONFIG_SWAP
 447
 448int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
 449                unsigned long nr_pages, sector_t start_block);
 450int generic_swapfile_activate(struct swap_info_struct *, struct file *,
 451                sector_t *);
 452
 453static inline unsigned long total_swapcache_pages(void)
 454{
 455        return global_node_page_state(NR_SWAPCACHE);
 456}
 457
 458extern void free_page_and_swap_cache(struct page *);
 459extern void free_pages_and_swap_cache(struct page **, int);
 460/* linux/mm/swapfile.c */
 461extern atomic_long_t nr_swap_pages;
 462extern long total_swap_pages;
 463extern atomic_t nr_rotate_swap;
 464extern bool has_usable_swap(void);
 465
 466/* Swap 50% full? Release swapcache more aggressively.. */
 467static inline bool vm_swap_full(void)
 468{
 469        return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
 470}
 471
 472static inline long get_nr_swap_pages(void)
 473{
 474        return atomic_long_read(&nr_swap_pages);
 475}
 476
 477extern void si_swapinfo(struct sysinfo *);
 478swp_entry_t folio_alloc_swap(struct folio *folio);
 479extern void put_swap_page(struct page *page, swp_entry_t entry);
 480extern swp_entry_t get_swap_page_of_type(int);
 481extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
 482extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 483extern void swap_shmem_alloc(swp_entry_t);
 484extern int swap_duplicate(swp_entry_t);
 485extern int swapcache_prepare(swp_entry_t);
 486extern void swap_free(swp_entry_t);
 487extern void swapcache_free_entries(swp_entry_t *entries, int n);
 488extern int free_swap_and_cache(swp_entry_t);
 489int swap_type_of(dev_t device, sector_t offset);
 490int find_first_swap(dev_t *device);
 491extern unsigned int count_swap_pages(int, int);
 492extern sector_t swapdev_block(int, pgoff_t);
 493extern int __swap_count(swp_entry_t entry);
 494extern int __swp_swapcount(swp_entry_t entry);
 495extern int swp_swapcount(swp_entry_t entry);
 496extern struct swap_info_struct *page_swap_info(struct page *);
 497extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
 498extern int try_to_free_swap(struct page *);
 499struct backing_dev_info;
 500extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
 501extern void exit_swap_address_space(unsigned int type);
 502extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
 503sector_t swap_page_sector(struct page *page);
 504
 505static inline void put_swap_device(struct swap_info_struct *si)
 506{
 507        percpu_ref_put(&si->users);
 508}
 509
 510#else /* CONFIG_SWAP */
 511static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 512{
 513        return NULL;
 514}
 515
 516static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
 517{
 518        return NULL;
 519}
 520
 521static inline void put_swap_device(struct swap_info_struct *si)
 522{
 523}
 524
 525#define get_nr_swap_pages()                     0L
 526#define total_swap_pages                        0L
 527#define total_swapcache_pages()                 0UL
 528#define vm_swap_full()                          0
 529
 530#define si_swapinfo(val) \
 531        do { (val)->freeswap = (val)->totalswap = 0; } while (0)
 532/* only sparc can not include linux/pagemap.h in this file
 533 * so leave put_page and release_pages undeclared... */
 534#define free_page_and_swap_cache(page) \
 535        put_page(page)
 536#define free_pages_and_swap_cache(pages, nr) \
 537        release_pages((pages), (nr));
 538
 539/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
 540#define free_swap_and_cache(e) is_pfn_swap_entry(e)
 541
 542static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
 543{
 544        return 0;
 545}
 546
 547static inline void swap_shmem_alloc(swp_entry_t swp)
 548{
 549}
 550
 551static inline int swap_duplicate(swp_entry_t swp)
 552{
 553        return 0;
 554}
 555
 556static inline void swap_free(swp_entry_t swp)
 557{
 558}
 559
 560static inline void put_swap_page(struct page *page, swp_entry_t swp)
 561{
 562}
 563
 564static inline int __swap_count(swp_entry_t entry)
 565{
 566        return 0;
 567}
 568
 569static inline int __swp_swapcount(swp_entry_t entry)
 570{
 571        return 0;
 572}
 573
 574static inline int swp_swapcount(swp_entry_t entry)
 575{
 576        return 0;
 577}
 578
 579static inline int try_to_free_swap(struct page *page)
 580{
 581        return 0;
 582}
 583
 584static inline swp_entry_t folio_alloc_swap(struct folio *folio)
 585{
 586        swp_entry_t entry;
 587        entry.val = 0;
 588        return entry;
 589}
 590
 591static inline int add_swap_extent(struct swap_info_struct *sis,
 592                                  unsigned long start_page,
 593                                  unsigned long nr_pages, sector_t start_block)
 594{
 595        return -EINVAL;
 596}
 597#endif /* CONFIG_SWAP */
 598
 599#ifdef CONFIG_THP_SWAP
 600extern int split_swap_cluster(swp_entry_t entry);
 601#else
 602static inline int split_swap_cluster(swp_entry_t entry)
 603{
 604        return 0;
 605}
 606#endif
 607
 608#ifdef CONFIG_MEMCG
 609static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 610{
 611        /* Cgroup2 doesn't have per-cgroup swappiness */
 612        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
 613                return vm_swappiness;
 614
 615        /* root ? */
 616        if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
 617                return vm_swappiness;
 618
 619        return memcg->swappiness;
 620}
 621#else
 622static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
 623{
 624        return vm_swappiness;
 625}
 626#endif
 627
 628#ifdef CONFIG_ZSWAP
 629extern u64 zswap_pool_total_size;
 630extern atomic_t zswap_stored_pages;
 631#endif
 632
 633#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
 634extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
 635static inline  void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
 636{
 637        if (mem_cgroup_disabled())
 638                return;
 639        __cgroup_throttle_swaprate(page, gfp_mask);
 640}
 641#else
 642static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
 643{
 644}
 645#endif
 646static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
 647{
 648        cgroup_throttle_swaprate(&folio->page, gfp);
 649}
 650
 651#ifdef CONFIG_MEMCG_SWAP
 652void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
 653int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
 654static inline int mem_cgroup_try_charge_swap(struct folio *folio,
 655                swp_entry_t entry)
 656{
 657        if (mem_cgroup_disabled())
 658                return 0;
 659        return __mem_cgroup_try_charge_swap(folio, entry);
 660}
 661
 662extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
 663static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
 664{
 665        if (mem_cgroup_disabled())
 666                return;
 667        __mem_cgroup_uncharge_swap(entry, nr_pages);
 668}
 669
 670extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
 671extern bool mem_cgroup_swap_full(struct page *page);
 672#else
 673static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
 674{
 675}
 676
 677static inline int mem_cgroup_try_charge_swap(struct folio *folio,
 678                                             swp_entry_t entry)
 679{
 680        return 0;
 681}
 682
 683static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
 684                                            unsigned int nr_pages)
 685{
 686}
 687
 688static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
 689{
 690        return get_nr_swap_pages();
 691}
 692
 693static inline bool mem_cgroup_swap_full(struct page *page)
 694{
 695        return vm_swap_full();
 696}
 697#endif
 698
 699#endif /* __KERNEL__*/
 700#endif /* _LINUX_SWAP_H */
 701