linux/include/linux/swap.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SWAP_H
   3#define _LINUX_SWAP_H
   4
   5#include <linux/spinlock.h>
   6#include <linux/linkage.h>
   7#include <linux/mmzone.h>
   8#include <linux/list.h>
   9#include <linux/memcontrol.h>
  10#include <linux/sched.h>
  11#include <linux/node.h>
  12#include <linux/fs.h>
  13#include <linux/atomic.h>
  14#include <linux/page-flags.h>
  15#include <asm/page.h>
  16
  17struct notifier_block;
  18
  19struct bio;
  20
  21struct pagevec;
  22
  23#define SWAP_FLAG_PREFER        0x8000  /* set if swap priority specified */
  24#define SWAP_FLAG_PRIO_MASK     0x7fff
  25#define SWAP_FLAG_PRIO_SHIFT    0
  26#define SWAP_FLAG_DISCARD       0x10000 /* enable discard for swap */
  27#define SWAP_FLAG_DISCARD_ONCE  0x20000 /* discard swap area at swapon-time */
  28#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
  29
  30#define SWAP_FLAGS_VALID        (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
  31                                 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
  32                                 SWAP_FLAG_DISCARD_PAGES)
  33#define SWAP_BATCH 64
  34
  35static inline int current_is_kswapd(void)
  36{
  37        return current->flags & PF_KSWAPD;
  38}
  39
  40/*
  41 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
  42 * be swapped to.  The swap type and the offset into that swap type are
  43 * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
  44 * for the type means that the maximum number of swapcache pages is 27 bits
  45 * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
  46 * the type/offset into the pte as 5/27 as well.
  47 */
  48#define MAX_SWAPFILES_SHIFT     5
  49
  50/*
  51 * Use some of the swap files numbers for other purposes. This
  52 * is a convenient way to hook into the VM to trigger special
  53 * actions on faults.
  54 */
  55
  56/*
  57 * Unaddressable device memory support. See include/linux/hmm.h and
  58 * Documentation/vm/hmm.rst. Short description is we need struct pages for
  59 * device memory that is unaddressable (inaccessible) by CPU, so that we can
  60 * migrate part of a process memory to device memory.
  61 *
  62 * When a page is migrated from CPU to device, we set the CPU page table entry
  63 * to a special SWP_DEVICE_* entry.
  64 */
  65#ifdef CONFIG_DEVICE_PRIVATE
  66#define SWP_DEVICE_NUM 2
  67#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
  68#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
  69#else
  70#define SWP_DEVICE_NUM 0
  71#endif
  72
  73/*
  74 * NUMA node memory migration support
  75 */
  76#ifdef CONFIG_MIGRATION
  77#define SWP_MIGRATION_NUM 2
  78#define SWP_MIGRATION_READ      (MAX_SWAPFILES + SWP_HWPOISON_NUM)
  79#define SWP_MIGRATION_WRITE     (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
  80#else
  81#define SWP_MIGRATION_NUM 0
  82#endif
  83
  84/*
  85 * Handling of hardware poisoned pages with memory corruption.
  86 */
  87#ifdef CONFIG_MEMORY_FAILURE
  88#define SWP_HWPOISON_NUM 1
  89#define SWP_HWPOISON            MAX_SWAPFILES
  90#else
  91#define SWP_HWPOISON_NUM 0
  92#endif
  93
  94#define MAX_SWAPFILES \
  95        ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
  96        SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
  97
  98/*
  99 * Magic header for a swap area. The first part of the union is
 100 * what the swap magic looks like for the old (limited to 128MB)
 101 * swap area format, the second part of the union adds - in the
 102 * old reserved area - some extra information. Note that the first
 103 * kilobyte is reserved for boot loader or disk label stuff...
 104 *
 105 * Having the magic at the end of the PAGE_SIZE makes detecting swap
 106 * areas somewhat tricky on machines that support multiple page sizes.
 107 * For 2.5 we'll probably want to move the magic to just beyond the
 108 * bootbits...
 109 */
 110union swap_header {
 111        struct {
 112                char reserved[PAGE_SIZE - 10];
 113                char magic[10];                 /* SWAP-SPACE or SWAPSPACE2 */
 114        } magic;
 115        struct {
 116                char            bootbits[1024]; /* Space for disklabel etc. */
 117                __u32           version;
 118                __u32           last_page;
 119                __u32           nr_badpages;
 120                unsigned char   sws_uuid[16];
 121                unsigned char   sws_volume[16];
 122                __u32           padding[117];
 123                __u32           badpages[1];
 124        } info;
 125};
 126
 127/*
 128 * current->reclaim_state points to one of these when a task is running
 129 * memory reclaim
 130 */
 131struct reclaim_state {
 132        unsigned long reclaimed_slab;
 133};
 134
 135#ifdef __KERNEL__
 136
 137struct address_space;
 138struct sysinfo;
 139struct writeback_control;
 140struct zone;
 141
 142/*
 143 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
 144 * disk blocks.  A list of swap extents maps the entire swapfile.  (Where the
 145 * term `swapfile' refers to either a blockdevice or an IS_REG file.  Apart
 146 * from setup, they're handled identically.
 147 *
 148 * We always assume that blocks are of size PAGE_SIZE.
 149 */
 150struct swap_extent {
 151        struct rb_node rb_node;
 152        pgoff_t start_page;
 153        pgoff_t nr_pages;
 154        sector_t start_block;
 155};
 156
 157/*
 158 * Max bad pages in the new format..
 159 */
 160#define MAX_SWAP_BADPAGES \
 161        ((offsetof(union swap_header, magic.magic) - \
 162          offsetof(union swap_header, info.badpages)) / sizeof(int))
 163
 164enum {
 165        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
 166        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
 167        SWP_DISCARDABLE = (1 << 2),     /* blkdev support discard */
 168        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
 169        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
 170        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
 171        SWP_BLKDEV      = (1 << 6),     /* its a block device */
 172        SWP_ACTIVATED   = (1 << 7),     /* set after swap_activate success */
 173        SWP_FS_OPS      = (1 << 8),     /* swapfile operations go through fs */
 174        SWP_AREA_DISCARD = (1 << 9),    /* single-time swap area discards */
 175        SWP_PAGE_DISCARD = (1 << 10),   /* freed swap page-cluster discards */
 176        SWP_STABLE_WRITES = (1 << 11),  /* no overwrite PG_writeback pages */
 177        SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
 178        SWP_VALID       = (1 << 13),    /* swap is valid to be operated on? */
 179                                        /* add others here before... */
 180        SWP_SCANNING    = (1 << 14),    /* refcount in scan_swap_map */
 181};
 182
 183#define SWAP_CLUSTER_MAX 32UL
 184#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
 185
 186/* Bit flag in swap_map */
 187#define SWAP_HAS_CACHE  0x40    /* Flag page is cached, in first swap_map */
 188#define COUNT_CONTINUED 0x80    /* Flag swap_map continuation for full count */
 189
 190/* Special value in first swap_map */
 191#define SWAP_MAP_MAX    0x3e    /* Max count */
 192#define SWAP_MAP_BAD    0x3f    /* Note page is bad */
 193#define SWAP_MAP_SHMEM  0xbf    /* Owned by shmem/tmpfs */
 194
 195/* Special value in each swap_map continuation */
 196#define SWAP_CONT_MAX   0x7f    /* Max count */
 197
 198/*
 199 * We use this to track usage of a cluster. A cluster is a block of swap disk
 200 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
 201 * free clusters are organized into a list. We fetch an entry from the list to
 202 * get a free cluster.
 203 *
 204 * The data field stores next cluster if the cluster is free or cluster usage
 205 * counter otherwise. The flags field determines if a cluster is free. This is
 206 * protected by swap_info_struct.lock.
 207 */
 208struct swap_cluster_info {
 209        spinlock_t lock;        /*
 210                                 * Protect swap_cluster_info fields
 211                                 * and swap_info_struct->swap_map
 212                                 * elements correspond to the swap
 213                                 * cluster
 214                                 */
 215        unsigned int data:24;
 216        unsigned int flags:8;
 217};
 218#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
 219#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
 220#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
 221
 222/*
 223 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
 224 * its own cluster and swapout sequentially. The purpose is to optimize swapout
 225 * throughput.
 226 */
 227struct percpu_cluster {
 228        struct swap_cluster_info index; /* Current cluster index */
 229        unsigned int next; /* Likely next allocation offset */
 230};
 231
 232struct swap_cluster_list {
 233        struct swap_cluster_info head;
 234        struct swap_cluster_info tail;
 235};
 236
 237/*
 238 * The in-memory structure used to track swap areas.
 239 */
 240struct swap_info_struct {
 241        unsigned long   flags;          /* SWP_USED etc: see above */
 242        signed short    prio;           /* swap priority of this type */
 243        struct plist_node list;         /* entry in swap_active_head */
 244        signed char     type;           /* strange name for an index */
 245        unsigned int    max;            /* extent of the swap_map */
 246        unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
 247        struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
 248        struct swap_cluster_list free_clusters; /* free clusters list */
 249        unsigned int lowest_bit;        /* index of first free in swap_map */
 250        unsigned int highest_bit;       /* index of last free in swap_map */
 251        unsigned int pages;             /* total of usable pages of swap */
 252        unsigned int inuse_pages;       /* number of those currently in use */
 253        unsigned int cluster_next;      /* likely index for next allocation */
 254        unsigned int cluster_nr;        /* countdown to next cluster search */
 255        unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
 256        struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
 257        struct rb_root swap_extent_root;/* root of the swap extent rbtree */
 258        struct block_device *bdev;      /* swap device or bdev of swap file */
 259        struct file *swap_file;         /* seldom referenced */
 260        unsigned int old_block_size;    /* seldom referenced */
 261#ifdef CONFIG_FRONTSWAP
 262        unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
 263        atomic_t frontswap_pages;       /* frontswap pages in-use counter */
 264#endif
 265        spinlock_t lock;                /*
 266                                         * protect map scan related fields like
 267                                         * swap_map, lowest_bit, highest_bit,
 268                                         * inuse_pages, cluster_next,
 269                                         * cluster_nr, lowest_alloc,
 270                                         * highest_alloc, free/discard cluster
 271                                         * list. other fields are only changed
 272                                         * at swapon/swapoff, so are protected
 273                                         * by swap_lock. changing flags need
 274                                         * hold this lock and swap_lock. If
 275                                         * both locks need hold, hold swap_lock
 276                                         * first.
 277                                         */
 278        spinlock_t cont_lock;           /*
 279                                         * protect swap count continuation page
 280                                         * list.
 281                                         */
 282        struct work_struct discard_work; /* discard worker */
 283        struct swap_cluster_list discard_clusters; /* discard clusters list */
 284        struct plist_node avail_lists[]; /*
 285                                           * entries in swap_avail_heads, one
 286                                           * entry per node.
 287                                           * Must be last as the number of the
 288                                           * array is nr_node_ids, which is not
 289                                           * a fixed value so have to allocate
 290                                           * dynamically.
 291                                           * And it has to be an array so that
 292                                           * plist_for_each_* can work.
 293                                           */
 294};
 295
 296#ifdef CONFIG_64BIT
 297#define SWAP_RA_ORDER_CEILING   5
 298#else
 299/* Avoid stack overflow, because we need to save part of page table */
 300#define SWAP_RA_ORDER_CEILING   3
 301#define SWAP_RA_PTE_CACHE_SIZE  (1 << SWAP_RA_ORDER_CEILING)
 302#endif
 303
 304struct vma_swap_readahead {
 305        unsigned short win;
 306        unsigned short offset;
 307        unsigned short nr_pte;
 308#ifdef CONFIG_64BIT
 309        pte_t *ptes;
 310#else
 311        pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
 312#endif
 313};
 314
 315/* linux/mm/workingset.c */
 316void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
 317void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
 318void workingset_refault(struct page *page, void *shadow);
 319void workingset_activation(struct page *page);
 320
 321/* Only track the nodes of mappings with shadow entries */
 322void workingset_update_node(struct xa_node *node);
 323#define mapping_set_update(xas, mapping) do {                           \
 324        if (!dax_mapping(mapping) && !shmem_mapping(mapping))           \
 325                xas_set_update(xas, workingset_update_node);            \
 326} while (0)
 327
 328/* linux/mm/page_alloc.c */
 329extern unsigned long totalreserve_pages;
 330extern unsigned long nr_free_buffer_pages(void);
 331
 332/* Definition of global_zone_page_state not available yet */
 333#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
 334
 335
 336/* linux/mm/swap.c */
 337extern void lru_note_cost(struct lruvec *lruvec, bool file,
 338                          unsigned int nr_pages);
 339extern void lru_note_cost_page(struct page *);
 340extern void lru_cache_add(struct page *);
 341extern void lru_add_page_tail(struct page *page, struct page *page_tail,
 342                         struct lruvec *lruvec, struct list_head *head);
 343extern void mark_page_accessed(struct page *);
 344extern void lru_add_drain(void);
 345extern void lru_add_drain_cpu(int cpu);
 346extern void lru_add_drain_cpu_zone(struct zone *zone);
 347extern void lru_add_drain_all(void);
 348extern void rotate_reclaimable_page(struct page *page);
 349extern void deactivate_file_page(struct page *page);
 350extern void deactivate_page(struct page *page);
 351extern void mark_page_lazyfree(struct page *page);
 352extern void swap_setup(void);
 353
 354extern void lru_cache_add_inactive_or_unevictable(struct page *page,
 355                                                struct vm_area_struct *vma);
 356
 357/* linux/mm/vmscan.c */
 358extern unsigned long zone_reclaimable_pages(struct zone *zone);
 359extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 360                                        gfp_t gfp_mask, nodemask_t *mask);
 361extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
 362extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 363                                                  unsigned long nr_pages,
 364                                                  gfp_t gfp_mask,
 365                                                  bool may_swap);
 366extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
 367                                                gfp_t gfp_mask, bool noswap,
 368                                                pg_data_t *pgdat,
 369                                                unsigned long *nr_scanned);
 370extern unsigned long shrink_all_memory(unsigned long nr_pages);
 371extern int vm_swappiness;
 372extern int remove_mapping(struct address_space *mapping, struct page *page);
 373
 374extern unsigned long reclaim_pages(struct list_head *page_list);
 375#ifdef CONFIG_NUMA
 376extern int node_reclaim_mode;
 377extern int sysctl_min_unmapped_ratio;
 378extern int sysctl_min_slab_ratio;
 379#else
 380#define node_reclaim_mode 0
 381#endif
 382
 383extern void check_move_unevictable_pages(struct pagevec *pvec);
 384
 385extern int kswapd_run(int nid);
 386extern void kswapd_stop(int nid);
 387
 388#ifdef CONFIG_SWAP
 389
 390#include <linux/blk_types.h> /* for bio_end_io_t */
 391
 392/* linux/mm/page_io.c */
 393extern int swap_readpage(struct page *page, bool do_poll);
 394extern int swap_writepage(struct page *page, struct writeback_control *wbc);
 395extern void end_swap_bio_write(struct bio *bio);
 396extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
 397        bio_end_io_t end_write_func);
 398extern int swap_set_page_dirty(struct page *page);
 399
 400int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
 401                unsigned long nr_pages, sector_t start_block);
 402int generic_swapfile_activate(struct swap_info_struct *, struct file *,
 403                sector_t *);
 404
 405/* linux/mm/swap_state.c */
 406/* One swap address space for each 64M swap space */
 407#define SWAP_ADDRESS_SPACE_SHIFT        14
 408#define SWAP_ADDRESS_SPACE_PAGES        (1 << SWAP_ADDRESS_SPACE_SHIFT)
 409extern struct address_space *swapper_spaces[];
 410#define swap_address_space(entry)                           \
 411        (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
 412                >> SWAP_ADDRESS_SPACE_SHIFT])
 413extern unsigned long total_swapcache_pages(void);
 414extern void show_swap_cache_info(void);
 415extern int add_to_swap(struct page *page);
 416extern void *get_shadow_from_swap_cache(swp_entry_t entry);
 417extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
 418                        gfp_t gfp, void **shadowp);
 419extern void __delete_from_swap_cache(struct page *page,
 420                        swp_entry_t entry, void *shadow);
 421extern void delete_from_swap_cache(struct page *);
 422extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
 423                                unsigned long end);
 424extern void free_page_and_swap_cache(struct page *);
 425extern void free_pages_and_swap_cache(struct page **, int);
 426extern struct page *lookup_swap_cache(swp_entry_t entry,
 427                                      struct vm_area_struct *vma,
 428                                      unsigned long addr);
 429struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
 430extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
 431                        struct vm_area_struct *vma, unsigned long addr,
 432                        bool do_poll);
 433extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
 434                        struct vm_area_struct *vma, unsigned long addr,
 435                        bool *new_page_allocated);
 436extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
 437                                struct vm_fault *vmf);
 438extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
 439                                struct vm_fault *vmf);
 440
 441/* linux/mm/swapfile.c */
 442extern atomic_long_t nr_swap_pages;
 443extern long total_swap_pages;
 444extern atomic_t nr_rotate_swap;
 445extern bool has_usable_swap(void);
 446
 447/* Swap 50% full? Release swapcache more aggressively.. */
 448static inline bool vm_swap_full(void)
 449{
 450        return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
 451}
 452
 453static inline long get_nr_swap_pages(void)
 454{
 455        return atomic_long_read(&nr_swap_pages);
 456}
 457
 458extern void si_swapinfo(struct sysinfo *);
 459extern swp_entry_t get_swap_page(struct page *page);
 460extern void put_swap_page(struct page *page, swp_entry_t entry);
 461extern swp_entry_t get_swap_page_of_type(int);
 462extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
 463extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 464extern void swap_shmem_alloc(swp_entry_t);
 465extern int swap_duplicate(swp_entry_t);
 466extern int swapcache_prepare(swp_entry_t);
 467extern void swap_free(swp_entry_t);
 468extern void swapcache_free_entries(swp_entry_t *entries, int n);
 469extern int free_swap_and_cache(swp_entry_t);
 470int swap_type_of(dev_t device, sector_t offset);
 471int find_first_swap(dev_t *device);
 472extern unsigned int count_swap_pages(int, int);
 473extern sector_t map_swap_page(struct page *, struct block_device **);
 474extern sector_t swapdev_block(int, pgoff_t);
 475extern int page_swapcount(struct page *);
 476extern int __swap_count(swp_entry_t entry);
 477extern int __swp_swapcount(swp_entry_t entry);
 478extern int swp_swapcount(swp_entry_t entry);
 479extern struct swap_info_struct *page_swap_info(struct page *);
 480extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
 481extern bool reuse_swap_page(struct page *, int *);
 482extern int try_to_free_swap(struct page *);
 483struct backing_dev_info;
 484extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
 485extern void exit_swap_address_space(unsigned int type);
 486extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
 487
 488static inline void put_swap_device(struct swap_info_struct *si)
 489{
 490        rcu_read_unlock();
 491}
 492
 493#else /* CONFIG_SWAP */
 494
 495static inline int swap_readpage(struct page *page, bool do_poll)
 496{
 497        return 0;
 498}
 499
 500static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 501{
 502        return NULL;
 503}
 504
 505#define swap_address_space(entry)               (NULL)
 506#define get_nr_swap_pages()                     0L
 507#define total_swap_pages                        0L
 508#define total_swapcache_pages()                 0UL
 509#define vm_swap_full()                          0
 510
 511#define si_swapinfo(val) \
 512        do { (val)->freeswap = (val)->totalswap = 0; } while (0)
 513/* only sparc can not include linux/pagemap.h in this file
 514 * so leave put_page and release_pages undeclared... */
 515#define free_page_and_swap_cache(page) \
 516        put_page(page)
 517#define free_pages_and_swap_cache(pages, nr) \
 518        release_pages((pages), (nr));
 519
 520static inline void show_swap_cache_info(void)
 521{
 522}
 523
 524#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
 525#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
 526
 527static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
 528{
 529        return 0;
 530}
 531
 532static inline void swap_shmem_alloc(swp_entry_t swp)
 533{
 534}
 535
 536static inline int swap_duplicate(swp_entry_t swp)
 537{
 538        return 0;
 539}
 540
 541static inline void swap_free(swp_entry_t swp)
 542{
 543}
 544
 545static inline void put_swap_page(struct page *page, swp_entry_t swp)
 546{
 547}
 548
 549static inline struct page *swap_cluster_readahead(swp_entry_t entry,
 550                                gfp_t gfp_mask, struct vm_fault *vmf)
 551{
 552        return NULL;
 553}
 554
 555static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
 556                        struct vm_fault *vmf)
 557{
 558        return NULL;
 559}
 560
 561static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
 562{
 563        return 0;
 564}
 565
 566static inline struct page *lookup_swap_cache(swp_entry_t swp,
 567                                             struct vm_area_struct *vma,
 568                                             unsigned long addr)
 569{
 570        return NULL;
 571}
 572
 573static inline
 574struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
 575{
 576        return find_get_page(mapping, index);
 577}
 578
 579static inline int add_to_swap(struct page *page)
 580{
 581        return 0;
 582}
 583
 584static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
 585{
 586        return NULL;
 587}
 588
 589static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
 590                                        gfp_t gfp_mask, void **shadowp)
 591{
 592        return -1;
 593}
 594
 595static inline void __delete_from_swap_cache(struct page *page,
 596                                        swp_entry_t entry, void *shadow)
 597{
 598}
 599
 600static inline void delete_from_swap_cache(struct page *page)
 601{
 602}
 603
 604static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
 605                                unsigned long end)
 606{
 607}
 608
 609static inline int page_swapcount(struct page *page)
 610{
 611        return 0;
 612}
 613
 614static inline int __swap_count(swp_entry_t entry)
 615{
 616        return 0;
 617}
 618
 619static inline int __swp_swapcount(swp_entry_t entry)
 620{
 621        return 0;
 622}
 623
 624static inline int swp_swapcount(swp_entry_t entry)
 625{
 626        return 0;
 627}
 628
 629#define reuse_swap_page(page, total_map_swapcount) \
 630        (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
 631
 632static inline int try_to_free_swap(struct page *page)
 633{
 634        return 0;
 635}
 636
 637static inline swp_entry_t get_swap_page(struct page *page)
 638{
 639        swp_entry_t entry;
 640        entry.val = 0;
 641        return entry;
 642}
 643
 644#endif /* CONFIG_SWAP */
 645
 646#ifdef CONFIG_THP_SWAP
 647extern int split_swap_cluster(swp_entry_t entry);
 648#else
 649static inline int split_swap_cluster(swp_entry_t entry)
 650{
 651        return 0;
 652}
 653#endif
 654
 655#ifdef CONFIG_MEMCG
 656static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 657{
 658        /* Cgroup2 doesn't have per-cgroup swappiness */
 659        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
 660                return vm_swappiness;
 661
 662        /* root ? */
 663        if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
 664                return vm_swappiness;
 665
 666        return memcg->swappiness;
 667}
 668#else
 669static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
 670{
 671        return vm_swappiness;
 672}
 673#endif
 674
 675#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
 676extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
 677#else
 678static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
 679{
 680}
 681#endif
 682
 683#ifdef CONFIG_MEMCG_SWAP
 684extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
 685extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
 686extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
 687extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
 688extern bool mem_cgroup_swap_full(struct page *page);
 689#else
 690static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
 691{
 692}
 693
 694static inline int mem_cgroup_try_charge_swap(struct page *page,
 695                                             swp_entry_t entry)
 696{
 697        return 0;
 698}
 699
 700static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
 701                                            unsigned int nr_pages)
 702{
 703}
 704
 705static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
 706{
 707        return get_nr_swap_pages();
 708}
 709
 710static inline bool mem_cgroup_swap_full(struct page *page)
 711{
 712        return vm_swap_full();
 713}
 714#endif
 715
 716#endif /* __KERNEL__*/
 717#endif /* _LINUX_SWAP_H */
 718