linux/include/linux/swap.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SWAP_H
   3#define _LINUX_SWAP_H
   4
   5#include <linux/spinlock.h>
   6#include <linux/linkage.h>
   7#include <linux/mmzone.h>
   8#include <linux/list.h>
   9#include <linux/memcontrol.h>
  10#include <linux/sched.h>
  11#include <linux/node.h>
  12#include <linux/fs.h>
  13#include <linux/atomic.h>
  14#include <linux/page-flags.h>
  15#include <asm/page.h>
  16
  17struct notifier_block;
  18
  19struct bio;
  20
  21#define SWAP_FLAG_PREFER        0x8000  /* set if swap priority specified */
  22#define SWAP_FLAG_PRIO_MASK     0x7fff
  23#define SWAP_FLAG_PRIO_SHIFT    0
  24#define SWAP_FLAG_DISCARD       0x10000 /* enable discard for swap */
  25#define SWAP_FLAG_DISCARD_ONCE  0x20000 /* discard swap area at swapon-time */
  26#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
  27
  28#define SWAP_FLAGS_VALID        (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
  29                                 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
  30                                 SWAP_FLAG_DISCARD_PAGES)
  31#define SWAP_BATCH 64
  32
  33static inline int current_is_kswapd(void)
  34{
  35        return current->flags & PF_KSWAPD;
  36}
  37
  38/*
  39 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
  40 * be swapped to.  The swap type and the offset into that swap type are
  41 * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
  42 * for the type means that the maximum number of swapcache pages is 27 bits
  43 * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
  44 * the type/offset into the pte as 5/27 as well.
  45 */
  46#define MAX_SWAPFILES_SHIFT     5
  47
  48/*
  49 * Use some of the swap files numbers for other purposes. This
  50 * is a convenient way to hook into the VM to trigger special
  51 * actions on faults.
  52 */
  53
  54/*
  55 * Unaddressable device memory support. See include/linux/hmm.h and
  56 * Documentation/vm/hmm.txt. Short description is we need struct pages for
  57 * device memory that is unaddressable (inaccessible) by CPU, so that we can
  58 * migrate part of a process memory to device memory.
  59 *
  60 * When a page is migrated from CPU to device, we set the CPU page table entry
  61 * to a special SWP_DEVICE_* entry.
  62 */
  63#ifdef CONFIG_DEVICE_PRIVATE
  64#define SWP_DEVICE_NUM 2
  65#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
  66#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
  67#else
  68#define SWP_DEVICE_NUM 0
  69#endif
  70
  71/*
  72 * NUMA node memory migration support
  73 */
  74#ifdef CONFIG_MIGRATION
  75#define SWP_MIGRATION_NUM 2
  76#define SWP_MIGRATION_READ      (MAX_SWAPFILES + SWP_HWPOISON_NUM)
  77#define SWP_MIGRATION_WRITE     (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
  78#else
  79#define SWP_MIGRATION_NUM 0
  80#endif
  81
  82/*
  83 * Handling of hardware poisoned pages with memory corruption.
  84 */
  85#ifdef CONFIG_MEMORY_FAILURE
  86#define SWP_HWPOISON_NUM 1
  87#define SWP_HWPOISON            MAX_SWAPFILES
  88#else
  89#define SWP_HWPOISON_NUM 0
  90#endif
  91
  92#define MAX_SWAPFILES \
  93        ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
  94        SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
  95
  96/*
  97 * Magic header for a swap area. The first part of the union is
  98 * what the swap magic looks like for the old (limited to 128MB)
  99 * swap area format, the second part of the union adds - in the
 100 * old reserved area - some extra information. Note that the first
 101 * kilobyte is reserved for boot loader or disk label stuff...
 102 *
 103 * Having the magic at the end of the PAGE_SIZE makes detecting swap
 104 * areas somewhat tricky on machines that support multiple page sizes.
 105 * For 2.5 we'll probably want to move the magic to just beyond the
 106 * bootbits...
 107 */
 108union swap_header {
 109        struct {
 110                char reserved[PAGE_SIZE - 10];
 111                char magic[10];                 /* SWAP-SPACE or SWAPSPACE2 */
 112        } magic;
 113        struct {
 114                char            bootbits[1024]; /* Space for disklabel etc. */
 115                __u32           version;
 116                __u32           last_page;
 117                __u32           nr_badpages;
 118                unsigned char   sws_uuid[16];
 119                unsigned char   sws_volume[16];
 120                __u32           padding[117];
 121                __u32           badpages[1];
 122        } info;
 123};
 124
 125/*
 126 * current->reclaim_state points to one of these when a task is running
 127 * memory reclaim
 128 */
 129struct reclaim_state {
 130        unsigned long reclaimed_slab;
 131};
 132
 133#ifdef __KERNEL__
 134
 135struct address_space;
 136struct sysinfo;
 137struct writeback_control;
 138struct zone;
 139
 140/*
 141 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
 142 * disk blocks.  A list of swap extents maps the entire swapfile.  (Where the
 143 * term `swapfile' refers to either a blockdevice or an IS_REG file.  Apart
 144 * from setup, they're handled identically.
 145 *
 146 * We always assume that blocks are of size PAGE_SIZE.
 147 */
 148struct swap_extent {
 149        struct list_head list;
 150        pgoff_t start_page;
 151        pgoff_t nr_pages;
 152        sector_t start_block;
 153};
 154
 155/*
 156 * Max bad pages in the new format..
 157 */
 158#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
 159#define MAX_SWAP_BADPAGES \
 160        ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
 161
 162enum {
 163        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
 164        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
 165        SWP_DISCARDABLE = (1 << 2),     /* blkdev support discard */
 166        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
 167        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
 168        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
 169        SWP_BLKDEV      = (1 << 6),     /* its a block device */
 170        SWP_FILE        = (1 << 7),     /* set after swap_activate success */
 171        SWP_AREA_DISCARD = (1 << 8),    /* single-time swap area discards */
 172        SWP_PAGE_DISCARD = (1 << 9),    /* freed swap page-cluster discards */
 173        SWP_STABLE_WRITES = (1 << 10),  /* no overwrite PG_writeback pages */
 174        SWP_SYNCHRONOUS_IO = (1 << 11), /* synchronous IO is efficient */
 175                                        /* add others here before... */
 176        SWP_SCANNING    = (1 << 12),    /* refcount in scan_swap_map */
 177};
 178
 179#define SWAP_CLUSTER_MAX 32UL
 180#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
 181
 182#define SWAP_MAP_MAX    0x3e    /* Max duplication count, in first swap_map */
 183#define SWAP_MAP_BAD    0x3f    /* Note pageblock is bad, in first swap_map */
 184#define SWAP_HAS_CACHE  0x40    /* Flag page is cached, in first swap_map */
 185#define SWAP_CONT_MAX   0x7f    /* Max count, in each swap_map continuation */
 186#define COUNT_CONTINUED 0x80    /* See swap_map continuation for full count */
 187#define SWAP_MAP_SHMEM  0xbf    /* Owned by shmem/tmpfs, in first swap_map */
 188
 189/*
 190 * We use this to track usage of a cluster. A cluster is a block of swap disk
 191 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
 192 * free clusters are organized into a list. We fetch an entry from the list to
 193 * get a free cluster.
 194 *
 195 * The data field stores next cluster if the cluster is free or cluster usage
 196 * counter otherwise. The flags field determines if a cluster is free. This is
 197 * protected by swap_info_struct.lock.
 198 */
 199struct swap_cluster_info {
 200        spinlock_t lock;        /*
 201                                 * Protect swap_cluster_info fields
 202                                 * and swap_info_struct->swap_map
 203                                 * elements correspond to the swap
 204                                 * cluster
 205                                 */
 206        unsigned int data:24;
 207        unsigned int flags:8;
 208};
 209#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
 210#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
 211#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
 212
 213/*
 214 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
 215 * its own cluster and swapout sequentially. The purpose is to optimize swapout
 216 * throughput.
 217 */
 218struct percpu_cluster {
 219        struct swap_cluster_info index; /* Current cluster index */
 220        unsigned int next; /* Likely next allocation offset */
 221};
 222
 223struct swap_cluster_list {
 224        struct swap_cluster_info head;
 225        struct swap_cluster_info tail;
 226};
 227
 228/*
 229 * The in-memory structure used to track swap areas.
 230 */
 231struct swap_info_struct {
 232        unsigned long   flags;          /* SWP_USED etc: see above */
 233        signed short    prio;           /* swap priority of this type */
 234        struct plist_node list;         /* entry in swap_active_head */
 235        struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
 236        signed char     type;           /* strange name for an index */
 237        unsigned int    max;            /* extent of the swap_map */
 238        unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
 239        struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
 240        struct swap_cluster_list free_clusters; /* free clusters list */
 241        unsigned int lowest_bit;        /* index of first free in swap_map */
 242        unsigned int highest_bit;       /* index of last free in swap_map */
 243        unsigned int pages;             /* total of usable pages of swap */
 244        unsigned int inuse_pages;       /* number of those currently in use */
 245        unsigned int cluster_next;      /* likely index for next allocation */
 246        unsigned int cluster_nr;        /* countdown to next cluster search */
 247        struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
 248        struct swap_extent *curr_swap_extent;
 249        struct swap_extent first_swap_extent;
 250        struct block_device *bdev;      /* swap device or bdev of swap file */
 251        struct file *swap_file;         /* seldom referenced */
 252        unsigned int old_block_size;    /* seldom referenced */
 253#ifdef CONFIG_FRONTSWAP
 254        unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
 255        atomic_t frontswap_pages;       /* frontswap pages in-use counter */
 256#endif
 257        spinlock_t lock;                /*
 258                                         * protect map scan related fields like
 259                                         * swap_map, lowest_bit, highest_bit,
 260                                         * inuse_pages, cluster_next,
 261                                         * cluster_nr, lowest_alloc,
 262                                         * highest_alloc, free/discard cluster
 263                                         * list. other fields are only changed
 264                                         * at swapon/swapoff, so are protected
 265                                         * by swap_lock. changing flags need
 266                                         * hold this lock and swap_lock. If
 267                                         * both locks need hold, hold swap_lock
 268                                         * first.
 269                                         */
 270        spinlock_t cont_lock;           /*
 271                                         * protect swap count continuation page
 272                                         * list.
 273                                         */
 274        struct work_struct discard_work; /* discard worker */
 275        struct swap_cluster_list discard_clusters; /* discard clusters list */
 276};
 277
 278#ifdef CONFIG_64BIT
 279#define SWAP_RA_ORDER_CEILING   5
 280#else
 281/* Avoid stack overflow, because we need to save part of page table */
 282#define SWAP_RA_ORDER_CEILING   3
 283#define SWAP_RA_PTE_CACHE_SIZE  (1 << SWAP_RA_ORDER_CEILING)
 284#endif
 285
 286struct vma_swap_readahead {
 287        unsigned short win;
 288        unsigned short offset;
 289        unsigned short nr_pte;
 290#ifdef CONFIG_64BIT
 291        pte_t *ptes;
 292#else
 293        pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
 294#endif
 295};
 296
 297/* linux/mm/workingset.c */
 298void *workingset_eviction(struct address_space *mapping, struct page *page);
 299bool workingset_refault(void *shadow);
 300void workingset_activation(struct page *page);
 301
 302/* Do not use directly, use workingset_lookup_update */
 303void workingset_update_node(struct radix_tree_node *node);
 304
 305/* Returns workingset_update_node() if the mapping has shadow entries. */
 306#define workingset_lookup_update(mapping)                               \
 307({                                                                      \
 308        radix_tree_update_node_t __helper = workingset_update_node;     \
 309        if (dax_mapping(mapping) || shmem_mapping(mapping))             \
 310                __helper = NULL;                                        \
 311        __helper;                                                       \
 312})
 313
 314/* linux/mm/page_alloc.c */
 315extern unsigned long totalram_pages;
 316extern unsigned long totalreserve_pages;
 317extern unsigned long nr_free_buffer_pages(void);
 318extern unsigned long nr_free_pagecache_pages(void);
 319
 320/* Definition of global_zone_page_state not available yet */
 321#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
 322
 323
 324/* linux/mm/swap.c */
 325extern void lru_cache_add(struct page *);
 326extern void lru_cache_add_anon(struct page *page);
 327extern void lru_cache_add_file(struct page *page);
 328extern void lru_add_page_tail(struct page *page, struct page *page_tail,
 329                         struct lruvec *lruvec, struct list_head *head);
 330extern void activate_page(struct page *);
 331extern void mark_page_accessed(struct page *);
 332extern void lru_add_drain(void);
 333extern void lru_add_drain_cpu(int cpu);
 334extern void lru_add_drain_all(void);
 335extern void lru_add_drain_all_cpuslocked(void);
 336extern void rotate_reclaimable_page(struct page *page);
 337extern void deactivate_file_page(struct page *page);
 338extern void mark_page_lazyfree(struct page *page);
 339extern void swap_setup(void);
 340
 341extern void add_page_to_unevictable_list(struct page *page);
 342
 343extern void lru_cache_add_active_or_unevictable(struct page *page,
 344                                                struct vm_area_struct *vma);
 345
 346/* linux/mm/vmscan.c */
 347extern unsigned long zone_reclaimable_pages(struct zone *zone);
 348extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
 349extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 350                                        gfp_t gfp_mask, nodemask_t *mask);
 351extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
 352extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 353                                                  unsigned long nr_pages,
 354                                                  gfp_t gfp_mask,
 355                                                  bool may_swap);
 356extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
 357                                                gfp_t gfp_mask, bool noswap,
 358                                                pg_data_t *pgdat,
 359                                                unsigned long *nr_scanned);
 360extern unsigned long shrink_all_memory(unsigned long nr_pages);
 361extern int vm_swappiness;
 362extern int remove_mapping(struct address_space *mapping, struct page *page);
 363extern unsigned long vm_total_pages;
 364
 365#ifdef CONFIG_NUMA
 366extern int node_reclaim_mode;
 367extern int sysctl_min_unmapped_ratio;
 368extern int sysctl_min_slab_ratio;
 369extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
 370#else
 371#define node_reclaim_mode 0
 372static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
 373                                unsigned int order)
 374{
 375        return 0;
 376}
 377#endif
 378
 379extern int page_evictable(struct page *page);
 380extern void check_move_unevictable_pages(struct page **, int nr_pages);
 381
 382extern int kswapd_run(int nid);
 383extern void kswapd_stop(int nid);
 384
 385#ifdef CONFIG_SWAP
 386
 387#include <linux/blk_types.h> /* for bio_end_io_t */
 388
 389/* linux/mm/page_io.c */
 390extern int swap_readpage(struct page *page, bool do_poll);
 391extern int swap_writepage(struct page *page, struct writeback_control *wbc);
 392extern void end_swap_bio_write(struct bio *bio);
 393extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
 394        bio_end_io_t end_write_func);
 395extern int swap_set_page_dirty(struct page *page);
 396
 397int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
 398                unsigned long nr_pages, sector_t start_block);
 399int generic_swapfile_activate(struct swap_info_struct *, struct file *,
 400                sector_t *);
 401
 402/* linux/mm/swap_state.c */
 403/* One swap address space for each 64M swap space */
 404#define SWAP_ADDRESS_SPACE_SHIFT        14
 405#define SWAP_ADDRESS_SPACE_PAGES        (1 << SWAP_ADDRESS_SPACE_SHIFT)
 406extern struct address_space *swapper_spaces[];
 407extern bool swap_vma_readahead;
 408#define swap_address_space(entry)                           \
 409        (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
 410                >> SWAP_ADDRESS_SPACE_SHIFT])
 411extern unsigned long total_swapcache_pages(void);
 412extern void show_swap_cache_info(void);
 413extern int add_to_swap(struct page *page);
 414extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
 415extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
 416extern void __delete_from_swap_cache(struct page *);
 417extern void delete_from_swap_cache(struct page *);
 418extern void free_page_and_swap_cache(struct page *);
 419extern void free_pages_and_swap_cache(struct page **, int);
 420extern struct page *lookup_swap_cache(swp_entry_t entry,
 421                                      struct vm_area_struct *vma,
 422                                      unsigned long addr);
 423extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
 424                        struct vm_area_struct *vma, unsigned long addr,
 425                        bool do_poll);
 426extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
 427                        struct vm_area_struct *vma, unsigned long addr,
 428                        bool *new_page_allocated);
 429extern struct page *swapin_readahead(swp_entry_t, gfp_t,
 430                        struct vm_area_struct *vma, unsigned long addr);
 431
 432extern struct page *swap_readahead_detect(struct vm_fault *vmf,
 433                                          struct vma_swap_readahead *swap_ra);
 434extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 435                                           struct vm_fault *vmf,
 436                                           struct vma_swap_readahead *swap_ra);
 437
 438/* linux/mm/swapfile.c */
 439extern atomic_long_t nr_swap_pages;
 440extern long total_swap_pages;
 441extern atomic_t nr_rotate_swap;
 442extern bool has_usable_swap(void);
 443
 444static inline bool swap_use_vma_readahead(void)
 445{
 446        return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
 447}
 448
 449/* Swap 50% full? Release swapcache more aggressively.. */
 450static inline bool vm_swap_full(void)
 451{
 452        return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
 453}
 454
 455static inline long get_nr_swap_pages(void)
 456{
 457        return atomic_long_read(&nr_swap_pages);
 458}
 459
 460extern void si_swapinfo(struct sysinfo *);
 461extern swp_entry_t get_swap_page(struct page *page);
 462extern void put_swap_page(struct page *page, swp_entry_t entry);
 463extern swp_entry_t get_swap_page_of_type(int);
 464extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]);
 465extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 466extern void swap_shmem_alloc(swp_entry_t);
 467extern int swap_duplicate(swp_entry_t);
 468extern int swapcache_prepare(swp_entry_t);
 469extern void swap_free(swp_entry_t);
 470extern void swapcache_free_entries(swp_entry_t *entries, int n);
 471extern int free_swap_and_cache(swp_entry_t);
 472extern int swap_type_of(dev_t, sector_t, struct block_device **);
 473extern unsigned int count_swap_pages(int, int);
 474extern sector_t map_swap_page(struct page *, struct block_device **);
 475extern sector_t swapdev_block(int, pgoff_t);
 476extern int page_swapcount(struct page *);
 477extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry);
 478extern int __swp_swapcount(swp_entry_t entry);
 479extern int swp_swapcount(swp_entry_t entry);
 480extern struct swap_info_struct *page_swap_info(struct page *);
 481extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
 482extern bool reuse_swap_page(struct page *, int *);
 483extern int try_to_free_swap(struct page *);
 484struct backing_dev_info;
 485extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
 486extern void exit_swap_address_space(unsigned int type);
 487
 488#else /* CONFIG_SWAP */
 489
 490static inline int swap_readpage(struct page *page, bool do_poll)
 491{
 492        return 0;
 493}
 494
 495static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 496{
 497        return NULL;
 498}
 499
 500#define swap_address_space(entry)               (NULL)
 501#define get_nr_swap_pages()                     0L
 502#define total_swap_pages                        0L
 503#define total_swapcache_pages()                 0UL
 504#define vm_swap_full()                          0
 505
 506#define si_swapinfo(val) \
 507        do { (val)->freeswap = (val)->totalswap = 0; } while (0)
 508/* only sparc can not include linux/pagemap.h in this file
 509 * so leave put_page and release_pages undeclared... */
 510#define free_page_and_swap_cache(page) \
 511        put_page(page)
 512#define free_pages_and_swap_cache(pages, nr) \
 513        release_pages((pages), (nr));
 514
 515static inline void show_swap_cache_info(void)
 516{
 517}
 518
 519#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
 520#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
 521
 522static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
 523{
 524        return 0;
 525}
 526
 527static inline void swap_shmem_alloc(swp_entry_t swp)
 528{
 529}
 530
 531static inline int swap_duplicate(swp_entry_t swp)
 532{
 533        return 0;
 534}
 535
 536static inline void swap_free(swp_entry_t swp)
 537{
 538}
 539
 540static inline void put_swap_page(struct page *page, swp_entry_t swp)
 541{
 542}
 543
 544static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
 545                        struct vm_area_struct *vma, unsigned long addr)
 546{
 547        return NULL;
 548}
 549
 550static inline bool swap_use_vma_readahead(void)
 551{
 552        return false;
 553}
 554
 555static inline struct page *swap_readahead_detect(
 556        struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
 557{
 558        return NULL;
 559}
 560
 561static inline struct page *do_swap_page_readahead(
 562        swp_entry_t fentry, gfp_t gfp_mask,
 563        struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
 564{
 565        return NULL;
 566}
 567
 568static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
 569{
 570        return 0;
 571}
 572
 573static inline struct page *lookup_swap_cache(swp_entry_t swp,
 574                                             struct vm_area_struct *vma,
 575                                             unsigned long addr)
 576{
 577        return NULL;
 578}
 579
 580static inline int add_to_swap(struct page *page)
 581{
 582        return 0;
 583}
 584
 585static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
 586                                                        gfp_t gfp_mask)
 587{
 588        return -1;
 589}
 590
 591static inline void __delete_from_swap_cache(struct page *page)
 592{
 593}
 594
 595static inline void delete_from_swap_cache(struct page *page)
 596{
 597}
 598
 599static inline int page_swapcount(struct page *page)
 600{
 601        return 0;
 602}
 603
 604static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
 605{
 606        return 0;
 607}
 608
 609static inline int __swp_swapcount(swp_entry_t entry)
 610{
 611        return 0;
 612}
 613
 614static inline int swp_swapcount(swp_entry_t entry)
 615{
 616        return 0;
 617}
 618
 619#define reuse_swap_page(page, total_map_swapcount) \
 620        (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
 621
 622static inline int try_to_free_swap(struct page *page)
 623{
 624        return 0;
 625}
 626
 627static inline swp_entry_t get_swap_page(struct page *page)
 628{
 629        swp_entry_t entry;
 630        entry.val = 0;
 631        return entry;
 632}
 633
 634#endif /* CONFIG_SWAP */
 635
 636#ifdef CONFIG_THP_SWAP
 637extern int split_swap_cluster(swp_entry_t entry);
 638#else
 639static inline int split_swap_cluster(swp_entry_t entry)
 640{
 641        return 0;
 642}
 643#endif
 644
 645#ifdef CONFIG_MEMCG
 646static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 647{
 648        /* Cgroup2 doesn't have per-cgroup swappiness */
 649        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
 650                return vm_swappiness;
 651
 652        /* root ? */
 653        if (mem_cgroup_disabled() || !memcg->css.parent)
 654                return vm_swappiness;
 655
 656        return memcg->swappiness;
 657}
 658
 659#else
 660static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
 661{
 662        return vm_swappiness;
 663}
 664#endif
 665
 666#ifdef CONFIG_MEMCG_SWAP
 667extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
 668extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
 669extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
 670extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
 671extern bool mem_cgroup_swap_full(struct page *page);
 672#else
 673static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
 674{
 675}
 676
 677static inline int mem_cgroup_try_charge_swap(struct page *page,
 678                                             swp_entry_t entry)
 679{
 680        return 0;
 681}
 682
 683static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
 684                                            unsigned int nr_pages)
 685{
 686}
 687
 688static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
 689{
 690        return get_nr_swap_pages();
 691}
 692
 693static inline bool mem_cgroup_swap_full(struct page *page)
 694{
 695        return vm_swap_full();
 696}
 697#endif
 698
 699#endif /* __KERNEL__*/
 700#endif /* _LINUX_SWAP_H */
 701