linux/include/linux/swap.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SWAP_H
   3#define _LINUX_SWAP_H
   4
   5#include <linux/spinlock.h>
   6#include <linux/linkage.h>
   7#include <linux/mmzone.h>
   8#include <linux/list.h>
   9#include <linux/memcontrol.h>
  10#include <linux/sched.h>
  11#include <linux/node.h>
  12#include <linux/fs.h>
  13#include <linux/atomic.h>
  14#include <linux/page-flags.h>
  15#include <asm/page.h>
  16
  17struct notifier_block;
  18
  19struct bio;
  20
  21struct pagevec;
  22
  23#define SWAP_FLAG_PREFER        0x8000  /* set if swap priority specified */
  24#define SWAP_FLAG_PRIO_MASK     0x7fff
  25#define SWAP_FLAG_PRIO_SHIFT    0
  26#define SWAP_FLAG_DISCARD       0x10000 /* enable discard for swap */
  27#define SWAP_FLAG_DISCARD_ONCE  0x20000 /* discard swap area at swapon-time */
  28#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
  29
  30#define SWAP_FLAGS_VALID        (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
  31                                 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
  32                                 SWAP_FLAG_DISCARD_PAGES)
  33#define SWAP_BATCH 64
  34
  35static inline int current_is_kswapd(void)
  36{
  37        return current->flags & PF_KSWAPD;
  38}
  39
  40/*
  41 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
  42 * be swapped to.  The swap type and the offset into that swap type are
  43 * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
  44 * for the type means that the maximum number of swapcache pages is 27 bits
  45 * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
  46 * the type/offset into the pte as 5/27 as well.
  47 */
  48#define MAX_SWAPFILES_SHIFT     5
  49
  50/*
  51 * Use some of the swap files numbers for other purposes. This
  52 * is a convenient way to hook into the VM to trigger special
  53 * actions on faults.
  54 */
  55
  56/*
  57 * Unaddressable device memory support. See include/linux/hmm.h and
  58 * Documentation/vm/hmm.rst. Short description is we need struct pages for
  59 * device memory that is unaddressable (inaccessible) by CPU, so that we can
  60 * migrate part of a process memory to device memory.
  61 *
  62 * When a page is migrated from CPU to device, we set the CPU page table entry
  63 * to a special SWP_DEVICE_* entry.
  64 */
  65#ifdef CONFIG_DEVICE_PRIVATE
  66#define SWP_DEVICE_NUM 2
  67#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
  68#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
  69#else
  70#define SWP_DEVICE_NUM 0
  71#endif
  72
  73/*
  74 * NUMA node memory migration support
  75 */
  76#ifdef CONFIG_MIGRATION
  77#define SWP_MIGRATION_NUM 2
  78#define SWP_MIGRATION_READ      (MAX_SWAPFILES + SWP_HWPOISON_NUM)
  79#define SWP_MIGRATION_WRITE     (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
  80#else
  81#define SWP_MIGRATION_NUM 0
  82#endif
  83
  84/*
  85 * Handling of hardware poisoned pages with memory corruption.
  86 */
  87#ifdef CONFIG_MEMORY_FAILURE
  88#define SWP_HWPOISON_NUM 1
  89#define SWP_HWPOISON            MAX_SWAPFILES
  90#else
  91#define SWP_HWPOISON_NUM 0
  92#endif
  93
  94#define MAX_SWAPFILES \
  95        ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
  96        SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
  97
  98/*
  99 * Magic header for a swap area. The first part of the union is
 100 * what the swap magic looks like for the old (limited to 128MB)
 101 * swap area format, the second part of the union adds - in the
 102 * old reserved area - some extra information. Note that the first
 103 * kilobyte is reserved for boot loader or disk label stuff...
 104 *
 105 * Having the magic at the end of the PAGE_SIZE makes detecting swap
 106 * areas somewhat tricky on machines that support multiple page sizes.
 107 * For 2.5 we'll probably want to move the magic to just beyond the
 108 * bootbits...
 109 */
 110union swap_header {
 111        struct {
 112                char reserved[PAGE_SIZE - 10];
 113                char magic[10];                 /* SWAP-SPACE or SWAPSPACE2 */
 114        } magic;
 115        struct {
 116                char            bootbits[1024]; /* Space for disklabel etc. */
 117                __u32           version;
 118                __u32           last_page;
 119                __u32           nr_badpages;
 120                unsigned char   sws_uuid[16];
 121                unsigned char   sws_volume[16];
 122                __u32           padding[117];
 123                __u32           badpages[1];
 124        } info;
 125};
 126
 127/*
 128 * current->reclaim_state points to one of these when a task is running
 129 * memory reclaim
 130 */
 131struct reclaim_state {
 132        unsigned long reclaimed_slab;
 133};
 134
 135#ifdef __KERNEL__
 136
 137struct address_space;
 138struct sysinfo;
 139struct writeback_control;
 140struct zone;
 141
 142/*
 143 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
 144 * disk blocks.  A list of swap extents maps the entire swapfile.  (Where the
 145 * term `swapfile' refers to either a blockdevice or an IS_REG file.  Apart
 146 * from setup, they're handled identically.
 147 *
 148 * We always assume that blocks are of size PAGE_SIZE.
 149 */
 150struct swap_extent {
 151        struct rb_node rb_node;
 152        pgoff_t start_page;
 153        pgoff_t nr_pages;
 154        sector_t start_block;
 155};
 156
 157/*
 158 * Max bad pages in the new format..
 159 */
 160#define MAX_SWAP_BADPAGES \
 161        ((offsetof(union swap_header, magic.magic) - \
 162          offsetof(union swap_header, info.badpages)) / sizeof(int))
 163
 164enum {
 165        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
 166        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
 167        SWP_DISCARDABLE = (1 << 2),     /* blkdev support discard */
 168        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
 169        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
 170        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
 171        SWP_BLKDEV      = (1 << 6),     /* its a block device */
 172        SWP_ACTIVATED   = (1 << 7),     /* set after swap_activate success */
 173        SWP_FS          = (1 << 8),     /* swap file goes through fs */
 174        SWP_AREA_DISCARD = (1 << 9),    /* single-time swap area discards */
 175        SWP_PAGE_DISCARD = (1 << 10),   /* freed swap page-cluster discards */
 176        SWP_STABLE_WRITES = (1 << 11),  /* no overwrite PG_writeback pages */
 177        SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
 178        SWP_VALID       = (1 << 13),    /* swap is valid to be operated on? */
 179                                        /* add others here before... */
 180        SWP_SCANNING    = (1 << 14),    /* refcount in scan_swap_map */
 181};
 182
 183#define SWAP_CLUSTER_MAX 32UL
 184#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
 185
 186#define SWAP_MAP_MAX    0x3e    /* Max duplication count, in first swap_map */
 187#define SWAP_MAP_BAD    0x3f    /* Note pageblock is bad, in first swap_map */
 188#define SWAP_HAS_CACHE  0x40    /* Flag page is cached, in first swap_map */
 189#define SWAP_CONT_MAX   0x7f    /* Max count, in each swap_map continuation */
 190#define COUNT_CONTINUED 0x80    /* See swap_map continuation for full count */
 191#define SWAP_MAP_SHMEM  0xbf    /* Owned by shmem/tmpfs, in first swap_map */
 192
 193/*
 194 * We use this to track usage of a cluster. A cluster is a block of swap disk
 195 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
 196 * free clusters are organized into a list. We fetch an entry from the list to
 197 * get a free cluster.
 198 *
 199 * The data field stores next cluster if the cluster is free or cluster usage
 200 * counter otherwise. The flags field determines if a cluster is free. This is
 201 * protected by swap_info_struct.lock.
 202 */
 203struct swap_cluster_info {
 204        spinlock_t lock;        /*
 205                                 * Protect swap_cluster_info fields
 206                                 * and swap_info_struct->swap_map
 207                                 * elements correspond to the swap
 208                                 * cluster
 209                                 */
 210        unsigned int data:24;
 211        unsigned int flags:8;
 212};
 213#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
 214#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
 215#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
 216
 217/*
 218 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
 219 * its own cluster and swapout sequentially. The purpose is to optimize swapout
 220 * throughput.
 221 */
 222struct percpu_cluster {
 223        struct swap_cluster_info index; /* Current cluster index */
 224        unsigned int next; /* Likely next allocation offset */
 225};
 226
 227struct swap_cluster_list {
 228        struct swap_cluster_info head;
 229        struct swap_cluster_info tail;
 230};
 231
 232/*
 233 * The in-memory structure used to track swap areas.
 234 */
 235struct swap_info_struct {
 236        unsigned long   flags;          /* SWP_USED etc: see above */
 237        signed short    prio;           /* swap priority of this type */
 238        struct plist_node list;         /* entry in swap_active_head */
 239        signed char     type;           /* strange name for an index */
 240        unsigned int    max;            /* extent of the swap_map */
 241        unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
 242        struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
 243        struct swap_cluster_list free_clusters; /* free clusters list */
 244        unsigned int lowest_bit;        /* index of first free in swap_map */
 245        unsigned int highest_bit;       /* index of last free in swap_map */
 246        unsigned int pages;             /* total of usable pages of swap */
 247        unsigned int inuse_pages;       /* number of those currently in use */
 248        unsigned int cluster_next;      /* likely index for next allocation */
 249        unsigned int cluster_nr;        /* countdown to next cluster search */
 250        struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
 251        struct rb_root swap_extent_root;/* root of the swap extent rbtree */
 252        struct block_device *bdev;      /* swap device or bdev of swap file */
 253        struct file *swap_file;         /* seldom referenced */
 254        unsigned int old_block_size;    /* seldom referenced */
 255#ifdef CONFIG_FRONTSWAP
 256        unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
 257        atomic_t frontswap_pages;       /* frontswap pages in-use counter */
 258#endif
 259        spinlock_t lock;                /*
 260                                         * protect map scan related fields like
 261                                         * swap_map, lowest_bit, highest_bit,
 262                                         * inuse_pages, cluster_next,
 263                                         * cluster_nr, lowest_alloc,
 264                                         * highest_alloc, free/discard cluster
 265                                         * list. other fields are only changed
 266                                         * at swapon/swapoff, so are protected
 267                                         * by swap_lock. changing flags need
 268                                         * hold this lock and swap_lock. If
 269                                         * both locks need hold, hold swap_lock
 270                                         * first.
 271                                         */
 272        spinlock_t cont_lock;           /*
 273                                         * protect swap count continuation page
 274                                         * list.
 275                                         */
 276        struct work_struct discard_work; /* discard worker */
 277        struct swap_cluster_list discard_clusters; /* discard clusters list */
 278        struct plist_node avail_lists[0]; /*
 279                                           * entries in swap_avail_heads, one
 280                                           * entry per node.
 281                                           * Must be last as the number of the
 282                                           * array is nr_node_ids, which is not
 283                                           * a fixed value so have to allocate
 284                                           * dynamically.
 285                                           * And it has to be an array so that
 286                                           * plist_for_each_* can work.
 287                                           */
 288};
 289
 290#ifdef CONFIG_64BIT
 291#define SWAP_RA_ORDER_CEILING   5
 292#else
 293/* Avoid stack overflow, because we need to save part of page table */
 294#define SWAP_RA_ORDER_CEILING   3
 295#define SWAP_RA_PTE_CACHE_SIZE  (1 << SWAP_RA_ORDER_CEILING)
 296#endif
 297
 298struct vma_swap_readahead {
 299        unsigned short win;
 300        unsigned short offset;
 301        unsigned short nr_pte;
 302#ifdef CONFIG_64BIT
 303        pte_t *ptes;
 304#else
 305        pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
 306#endif
 307};
 308
 309/* linux/mm/workingset.c */
 310void *workingset_eviction(struct page *page);
 311void workingset_refault(struct page *page, void *shadow);
 312void workingset_activation(struct page *page);
 313
 314/* Only track the nodes of mappings with shadow entries */
 315void workingset_update_node(struct xa_node *node);
 316#define mapping_set_update(xas, mapping) do {                           \
 317        if (!dax_mapping(mapping) && !shmem_mapping(mapping))           \
 318                xas_set_update(xas, workingset_update_node);            \
 319} while (0)
 320
 321/* linux/mm/page_alloc.c */
 322extern unsigned long totalreserve_pages;
 323extern unsigned long nr_free_buffer_pages(void);
 324extern unsigned long nr_free_pagecache_pages(void);
 325
 326/* Definition of global_zone_page_state not available yet */
 327#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
 328
 329
 330/* linux/mm/swap.c */
 331extern void lru_cache_add(struct page *);
 332extern void lru_cache_add_anon(struct page *page);
 333extern void lru_cache_add_file(struct page *page);
 334extern void lru_add_page_tail(struct page *page, struct page *page_tail,
 335                         struct lruvec *lruvec, struct list_head *head);
 336extern void activate_page(struct page *);
 337extern void mark_page_accessed(struct page *);
 338extern void lru_add_drain(void);
 339extern void lru_add_drain_cpu(int cpu);
 340extern void lru_add_drain_all(void);
 341extern void rotate_reclaimable_page(struct page *page);
 342extern void deactivate_file_page(struct page *page);
 343extern void mark_page_lazyfree(struct page *page);
 344extern void swap_setup(void);
 345
 346extern void lru_cache_add_active_or_unevictable(struct page *page,
 347                                                struct vm_area_struct *vma);
 348
 349/* linux/mm/vmscan.c */
 350extern unsigned long zone_reclaimable_pages(struct zone *zone);
 351extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 352                                        gfp_t gfp_mask, nodemask_t *mask);
 353extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
 354extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 355                                                  unsigned long nr_pages,
 356                                                  gfp_t gfp_mask,
 357                                                  bool may_swap);
 358extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
 359                                                gfp_t gfp_mask, bool noswap,
 360                                                pg_data_t *pgdat,
 361                                                unsigned long *nr_scanned);
 362extern unsigned long shrink_all_memory(unsigned long nr_pages);
 363extern int vm_swappiness;
 364extern int remove_mapping(struct address_space *mapping, struct page *page);
 365extern unsigned long vm_total_pages;
 366
 367#ifdef CONFIG_NUMA
 368extern int node_reclaim_mode;
 369extern int sysctl_min_unmapped_ratio;
 370extern int sysctl_min_slab_ratio;
 371#else
 372#define node_reclaim_mode 0
 373#endif
 374
 375extern int page_evictable(struct page *page);
 376extern void check_move_unevictable_pages(struct pagevec *pvec);
 377
 378extern int kswapd_run(int nid);
 379extern void kswapd_stop(int nid);
 380
 381#ifdef CONFIG_SWAP
 382
 383#include <linux/blk_types.h> /* for bio_end_io_t */
 384
 385/* linux/mm/page_io.c */
 386extern int swap_readpage(struct page *page, bool do_poll);
 387extern int swap_writepage(struct page *page, struct writeback_control *wbc);
 388extern void end_swap_bio_write(struct bio *bio);
 389extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
 390        bio_end_io_t end_write_func);
 391extern int swap_set_page_dirty(struct page *page);
 392
 393int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
 394                unsigned long nr_pages, sector_t start_block);
 395int generic_swapfile_activate(struct swap_info_struct *, struct file *,
 396                sector_t *);
 397
 398/* linux/mm/swap_state.c */
 399/* One swap address space for each 64M swap space */
 400#define SWAP_ADDRESS_SPACE_SHIFT        14
 401#define SWAP_ADDRESS_SPACE_PAGES        (1 << SWAP_ADDRESS_SPACE_SHIFT)
 402extern struct address_space *swapper_spaces[];
 403#define swap_address_space(entry)                           \
 404        (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
 405                >> SWAP_ADDRESS_SPACE_SHIFT])
 406extern unsigned long total_swapcache_pages(void);
 407extern void show_swap_cache_info(void);
 408extern int add_to_swap(struct page *page);
 409extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
 410extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
 411extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
 412extern void delete_from_swap_cache(struct page *);
 413extern void free_page_and_swap_cache(struct page *);
 414extern void free_pages_and_swap_cache(struct page **, int);
 415extern struct page *lookup_swap_cache(swp_entry_t entry,
 416                                      struct vm_area_struct *vma,
 417                                      unsigned long addr);
 418extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
 419                        struct vm_area_struct *vma, unsigned long addr,
 420                        bool do_poll);
 421extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
 422                        struct vm_area_struct *vma, unsigned long addr,
 423                        bool *new_page_allocated);
 424extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
 425                                struct vm_fault *vmf);
 426extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
 427                                struct vm_fault *vmf);
 428
 429/* linux/mm/swapfile.c */
 430extern atomic_long_t nr_swap_pages;
 431extern long total_swap_pages;
 432extern atomic_t nr_rotate_swap;
 433extern bool has_usable_swap(void);
 434
 435/* Swap 50% full? Release swapcache more aggressively.. */
 436static inline bool vm_swap_full(void)
 437{
 438        return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
 439}
 440
 441static inline long get_nr_swap_pages(void)
 442{
 443        return atomic_long_read(&nr_swap_pages);
 444}
 445
 446extern void si_swapinfo(struct sysinfo *);
 447extern swp_entry_t get_swap_page(struct page *page);
 448extern void put_swap_page(struct page *page, swp_entry_t entry);
 449extern swp_entry_t get_swap_page_of_type(int);
 450extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
 451extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 452extern void swap_shmem_alloc(swp_entry_t);
 453extern int swap_duplicate(swp_entry_t);
 454extern int swapcache_prepare(swp_entry_t);
 455extern void swap_free(swp_entry_t);
 456extern void swapcache_free_entries(swp_entry_t *entries, int n);
 457extern int free_swap_and_cache(swp_entry_t);
 458extern int swap_type_of(dev_t, sector_t, struct block_device **);
 459extern unsigned int count_swap_pages(int, int);
 460extern sector_t map_swap_page(struct page *, struct block_device **);
 461extern sector_t swapdev_block(int, pgoff_t);
 462extern int page_swapcount(struct page *);
 463extern int __swap_count(swp_entry_t entry);
 464extern int __swp_swapcount(swp_entry_t entry);
 465extern int swp_swapcount(swp_entry_t entry);
 466extern struct swap_info_struct *page_swap_info(struct page *);
 467extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
 468extern bool reuse_swap_page(struct page *, int *);
 469extern int try_to_free_swap(struct page *);
 470struct backing_dev_info;
 471extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
 472extern void exit_swap_address_space(unsigned int type);
 473extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
 474
 475static inline void put_swap_device(struct swap_info_struct *si)
 476{
 477        rcu_read_unlock();
 478}
 479
 480#else /* CONFIG_SWAP */
 481
 482static inline int swap_readpage(struct page *page, bool do_poll)
 483{
 484        return 0;
 485}
 486
 487static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 488{
 489        return NULL;
 490}
 491
 492#define swap_address_space(entry)               (NULL)
 493#define get_nr_swap_pages()                     0L
 494#define total_swap_pages                        0L
 495#define total_swapcache_pages()                 0UL
 496#define vm_swap_full()                          0
 497
 498#define si_swapinfo(val) \
 499        do { (val)->freeswap = (val)->totalswap = 0; } while (0)
 500/* only sparc can not include linux/pagemap.h in this file
 501 * so leave put_page and release_pages undeclared... */
 502#define free_page_and_swap_cache(page) \
 503        put_page(page)
 504#define free_pages_and_swap_cache(pages, nr) \
 505        release_pages((pages), (nr));
 506
 507static inline void show_swap_cache_info(void)
 508{
 509}
 510
 511#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
 512#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
 513
 514static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
 515{
 516        return 0;
 517}
 518
 519static inline void swap_shmem_alloc(swp_entry_t swp)
 520{
 521}
 522
 523static inline int swap_duplicate(swp_entry_t swp)
 524{
 525        return 0;
 526}
 527
 528static inline void swap_free(swp_entry_t swp)
 529{
 530}
 531
 532static inline void put_swap_page(struct page *page, swp_entry_t swp)
 533{
 534}
 535
 536static inline struct page *swap_cluster_readahead(swp_entry_t entry,
 537                                gfp_t gfp_mask, struct vm_fault *vmf)
 538{
 539        return NULL;
 540}
 541
 542static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
 543                        struct vm_fault *vmf)
 544{
 545        return NULL;
 546}
 547
 548static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
 549{
 550        return 0;
 551}
 552
 553static inline struct page *lookup_swap_cache(swp_entry_t swp,
 554                                             struct vm_area_struct *vma,
 555                                             unsigned long addr)
 556{
 557        return NULL;
 558}
 559
 560static inline int add_to_swap(struct page *page)
 561{
 562        return 0;
 563}
 564
 565static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
 566                                                        gfp_t gfp_mask)
 567{
 568        return -1;
 569}
 570
 571static inline void __delete_from_swap_cache(struct page *page,
 572                                                        swp_entry_t entry)
 573{
 574}
 575
 576static inline void delete_from_swap_cache(struct page *page)
 577{
 578}
 579
 580static inline int page_swapcount(struct page *page)
 581{
 582        return 0;
 583}
 584
 585static inline int __swap_count(swp_entry_t entry)
 586{
 587        return 0;
 588}
 589
 590static inline int __swp_swapcount(swp_entry_t entry)
 591{
 592        return 0;
 593}
 594
 595static inline int swp_swapcount(swp_entry_t entry)
 596{
 597        return 0;
 598}
 599
 600#define reuse_swap_page(page, total_map_swapcount) \
 601        (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
 602
 603static inline int try_to_free_swap(struct page *page)
 604{
 605        return 0;
 606}
 607
 608static inline swp_entry_t get_swap_page(struct page *page)
 609{
 610        swp_entry_t entry;
 611        entry.val = 0;
 612        return entry;
 613}
 614
 615#endif /* CONFIG_SWAP */
 616
 617#ifdef CONFIG_THP_SWAP
 618extern int split_swap_cluster(swp_entry_t entry);
 619#else
 620static inline int split_swap_cluster(swp_entry_t entry)
 621{
 622        return 0;
 623}
 624#endif
 625
 626#ifdef CONFIG_MEMCG
 627static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 628{
 629        /* Cgroup2 doesn't have per-cgroup swappiness */
 630        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
 631                return vm_swappiness;
 632
 633        /* root ? */
 634        if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
 635                return vm_swappiness;
 636
 637        return memcg->swappiness;
 638}
 639#else
 640static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
 641{
 642        return vm_swappiness;
 643}
 644#endif
 645
 646#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
 647extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
 648                                         gfp_t gfp_mask);
 649#else
 650static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
 651                                                int node, gfp_t gfp_mask)
 652{
 653}
 654#endif
 655
 656#ifdef CONFIG_MEMCG_SWAP
 657extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
 658extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
 659extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
 660extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
 661extern bool mem_cgroup_swap_full(struct page *page);
 662#else
 663static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
 664{
 665}
 666
 667static inline int mem_cgroup_try_charge_swap(struct page *page,
 668                                             swp_entry_t entry)
 669{
 670        return 0;
 671}
 672
 673static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
 674                                            unsigned int nr_pages)
 675{
 676}
 677
 678static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
 679{
 680        return get_nr_swap_pages();
 681}
 682
 683static inline bool mem_cgroup_swap_full(struct page *page)
 684{
 685        return vm_swap_full();
 686}
 687#endif
 688
 689#endif /* __KERNEL__*/
 690#endif /* _LINUX_SWAP_H */
 691