linux/include/linux/swapops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SWAPOPS_H
   3#define _LINUX_SWAPOPS_H
   4
   5#include <linux/radix-tree.h>
   6#include <linux/bug.h>
   7#include <linux/mm_types.h>
   8
   9#ifdef CONFIG_MMU
  10
  11/*
  12 * swapcache pages are stored in the swapper_space radix tree.  We want to
  13 * get good packing density in that tree, so the index should be dense in
  14 * the low-order bits.
  15 *
  16 * We arrange the `type' and `offset' fields so that `type' is at the seven
  17 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
  18 * remaining bits.  Although `type' itself needs only five bits, we allow for
  19 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
  20 *
  21 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  22 */
  23#define SWP_TYPE_SHIFT  (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
  24#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
  25
  26/*
  27 * Store a type+offset into a swp_entry_t in an arch-independent format
  28 */
  29static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
  30{
  31        swp_entry_t ret;
  32
  33        ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
  34        return ret;
  35}
  36
  37/*
  38 * Extract the `type' field from a swp_entry_t.  The swp_entry_t is in
  39 * arch-independent format
  40 */
  41static inline unsigned swp_type(swp_entry_t entry)
  42{
  43        return (entry.val >> SWP_TYPE_SHIFT);
  44}
  45
  46/*
  47 * Extract the `offset' field from a swp_entry_t.  The swp_entry_t is in
  48 * arch-independent format
  49 */
  50static inline pgoff_t swp_offset(swp_entry_t entry)
  51{
  52        return entry.val & SWP_OFFSET_MASK;
  53}
  54
  55/* check whether a pte points to a swap entry */
  56static inline int is_swap_pte(pte_t pte)
  57{
  58        return !pte_none(pte) && !pte_present(pte);
  59}
  60
  61/*
  62 * Convert the arch-dependent pte representation of a swp_entry_t into an
  63 * arch-independent swp_entry_t.
  64 */
  65static inline swp_entry_t pte_to_swp_entry(pte_t pte)
  66{
  67        swp_entry_t arch_entry;
  68
  69        if (pte_swp_soft_dirty(pte))
  70                pte = pte_swp_clear_soft_dirty(pte);
  71        arch_entry = __pte_to_swp_entry(pte);
  72        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  73}
  74
  75/*
  76 * Convert the arch-independent representation of a swp_entry_t into the
  77 * arch-dependent pte representation.
  78 */
  79static inline pte_t swp_entry_to_pte(swp_entry_t entry)
  80{
  81        swp_entry_t arch_entry;
  82
  83        arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  84        return __swp_entry_to_pte(arch_entry);
  85}
  86
  87static inline swp_entry_t radix_to_swp_entry(void *arg)
  88{
  89        swp_entry_t entry;
  90
  91        entry.val = xa_to_value(arg);
  92        return entry;
  93}
  94
  95static inline void *swp_to_radix_entry(swp_entry_t entry)
  96{
  97        return xa_mk_value(entry.val);
  98}
  99
 100#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
 101static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 102{
 103        return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
 104                         page_to_pfn(page));
 105}
 106
 107static inline bool is_device_private_entry(swp_entry_t entry)
 108{
 109        int type = swp_type(entry);
 110        return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
 111}
 112
 113static inline void make_device_private_entry_read(swp_entry_t *entry)
 114{
 115        *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
 116}
 117
 118static inline bool is_write_device_private_entry(swp_entry_t entry)
 119{
 120        return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
 121}
 122
 123static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
 124{
 125        return swp_offset(entry);
 126}
 127
 128static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 129{
 130        return pfn_to_page(swp_offset(entry));
 131}
 132#else /* CONFIG_DEVICE_PRIVATE */
 133static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 134{
 135        return swp_entry(0, 0);
 136}
 137
 138static inline void make_device_private_entry_read(swp_entry_t *entry)
 139{
 140}
 141
 142static inline bool is_device_private_entry(swp_entry_t entry)
 143{
 144        return false;
 145}
 146
 147static inline bool is_write_device_private_entry(swp_entry_t entry)
 148{
 149        return false;
 150}
 151
 152static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
 153{
 154        return 0;
 155}
 156
 157static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 158{
 159        return NULL;
 160}
 161#endif /* CONFIG_DEVICE_PRIVATE */
 162
 163#ifdef CONFIG_MIGRATION
 164static inline swp_entry_t make_migration_entry(struct page *page, int write)
 165{
 166        BUG_ON(!PageLocked(compound_head(page)));
 167
 168        return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
 169                        page_to_pfn(page));
 170}
 171
 172static inline int is_migration_entry(swp_entry_t entry)
 173{
 174        return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
 175                        swp_type(entry) == SWP_MIGRATION_WRITE);
 176}
 177
 178static inline int is_write_migration_entry(swp_entry_t entry)
 179{
 180        return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
 181}
 182
 183static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
 184{
 185        return swp_offset(entry);
 186}
 187
 188static inline struct page *migration_entry_to_page(swp_entry_t entry)
 189{
 190        struct page *p = pfn_to_page(swp_offset(entry));
 191        /*
 192         * Any use of migration entries may only occur while the
 193         * corresponding page is locked
 194         */
 195        BUG_ON(!PageLocked(compound_head(p)));
 196        return p;
 197}
 198
 199static inline void make_migration_entry_read(swp_entry_t *entry)
 200{
 201        *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
 202}
 203
 204extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 205                                        spinlock_t *ptl);
 206extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 207                                        unsigned long address);
 208extern void migration_entry_wait_huge(struct vm_area_struct *vma,
 209                struct mm_struct *mm, pte_t *pte);
 210#else
 211
 212#define make_migration_entry(page, write) swp_entry(0, 0)
 213static inline int is_migration_entry(swp_entry_t swp)
 214{
 215        return 0;
 216}
 217
 218static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
 219{
 220        return 0;
 221}
 222
 223static inline struct page *migration_entry_to_page(swp_entry_t entry)
 224{
 225        return NULL;
 226}
 227
 228static inline void make_migration_entry_read(swp_entry_t *entryp) { }
 229static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 230                                        spinlock_t *ptl) { }
 231static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 232                                         unsigned long address) { }
 233static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
 234                struct mm_struct *mm, pte_t *pte) { }
 235static inline int is_write_migration_entry(swp_entry_t entry)
 236{
 237        return 0;
 238}
 239
 240#endif
 241
 242struct page_vma_mapped_walk;
 243
 244#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 245extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 246                struct page *page);
 247
 248extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
 249                struct page *new);
 250
 251extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
 252
 253static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 254{
 255        swp_entry_t arch_entry;
 256
 257        if (pmd_swp_soft_dirty(pmd))
 258                pmd = pmd_swp_clear_soft_dirty(pmd);
 259        arch_entry = __pmd_to_swp_entry(pmd);
 260        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 261}
 262
 263static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
 264{
 265        swp_entry_t arch_entry;
 266
 267        arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
 268        return __swp_entry_to_pmd(arch_entry);
 269}
 270
 271static inline int is_pmd_migration_entry(pmd_t pmd)
 272{
 273        return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
 274}
 275#else
 276static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 277                struct page *page)
 278{
 279        BUILD_BUG();
 280}
 281
 282static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
 283                struct page *new)
 284{
 285        BUILD_BUG();
 286}
 287
 288static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
 289
 290static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 291{
 292        return swp_entry(0, 0);
 293}
 294
 295static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
 296{
 297        return __pmd(0);
 298}
 299
 300static inline int is_pmd_migration_entry(pmd_t pmd)
 301{
 302        return 0;
 303}
 304#endif
 305
 306#ifdef CONFIG_MEMORY_FAILURE
 307
 308extern atomic_long_t num_poisoned_pages __read_mostly;
 309
 310/*
 311 * Support for hardware poisoned pages
 312 */
 313static inline swp_entry_t make_hwpoison_entry(struct page *page)
 314{
 315        BUG_ON(!PageLocked(page));
 316        return swp_entry(SWP_HWPOISON, page_to_pfn(page));
 317}
 318
 319static inline int is_hwpoison_entry(swp_entry_t entry)
 320{
 321        return swp_type(entry) == SWP_HWPOISON;
 322}
 323
 324static inline void num_poisoned_pages_inc(void)
 325{
 326        atomic_long_inc(&num_poisoned_pages);
 327}
 328
 329static inline void num_poisoned_pages_dec(void)
 330{
 331        atomic_long_dec(&num_poisoned_pages);
 332}
 333
 334#else
 335
 336static inline swp_entry_t make_hwpoison_entry(struct page *page)
 337{
 338        return swp_entry(0, 0);
 339}
 340
 341static inline int is_hwpoison_entry(swp_entry_t swp)
 342{
 343        return 0;
 344}
 345
 346static inline void num_poisoned_pages_inc(void)
 347{
 348}
 349#endif
 350
 351#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
 352static inline int non_swap_entry(swp_entry_t entry)
 353{
 354        return swp_type(entry) >= MAX_SWAPFILES;
 355}
 356#else
 357static inline int non_swap_entry(swp_entry_t entry)
 358{
 359        return 0;
 360}
 361#endif
 362
 363#endif /* CONFIG_MMU */
 364#endif /* _LINUX_SWAPOPS_H */
 365