linux/include/linux/swapops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SWAPOPS_H
   3#define _LINUX_SWAPOPS_H
   4
   5#include <linux/radix-tree.h>
   6#include <linux/bug.h>
   7#include <linux/mm_types.h>
   8
   9#ifdef CONFIG_MMU
  10
  11/*
  12 * swapcache pages are stored in the swapper_space radix tree.  We want to
  13 * get good packing density in that tree, so the index should be dense in
  14 * the low-order bits.
  15 *
  16 * We arrange the `type' and `offset' fields so that `type' is at the seven
  17 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
  18 * remaining bits.  Although `type' itself needs only five bits, we allow for
  19 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
  20 *
  21 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  22 */
  23#define SWP_TYPE_SHIFT  (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
  24#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
  25
  26/* Clear all flags but only keep swp_entry_t related information */
  27static inline pte_t pte_swp_clear_flags(pte_t pte)
  28{
  29        if (pte_swp_soft_dirty(pte))
  30                pte = pte_swp_clear_soft_dirty(pte);
  31        if (pte_swp_uffd_wp(pte))
  32                pte = pte_swp_clear_uffd_wp(pte);
  33        return pte;
  34}
  35
  36/*
  37 * Store a type+offset into a swp_entry_t in an arch-independent format
  38 */
  39static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
  40{
  41        swp_entry_t ret;
  42
  43        ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
  44        return ret;
  45}
  46
  47/*
  48 * Extract the `type' field from a swp_entry_t.  The swp_entry_t is in
  49 * arch-independent format
  50 */
  51static inline unsigned swp_type(swp_entry_t entry)
  52{
  53        return (entry.val >> SWP_TYPE_SHIFT);
  54}
  55
  56/*
  57 * Extract the `offset' field from a swp_entry_t.  The swp_entry_t is in
  58 * arch-independent format
  59 */
  60static inline pgoff_t swp_offset(swp_entry_t entry)
  61{
  62        return entry.val & SWP_OFFSET_MASK;
  63}
  64
  65/* check whether a pte points to a swap entry */
  66static inline int is_swap_pte(pte_t pte)
  67{
  68        return !pte_none(pte) && !pte_present(pte);
  69}
  70
  71/*
  72 * Convert the arch-dependent pte representation of a swp_entry_t into an
  73 * arch-independent swp_entry_t.
  74 */
  75static inline swp_entry_t pte_to_swp_entry(pte_t pte)
  76{
  77        swp_entry_t arch_entry;
  78
  79        pte = pte_swp_clear_flags(pte);
  80        arch_entry = __pte_to_swp_entry(pte);
  81        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  82}
  83
  84/*
  85 * Convert the arch-independent representation of a swp_entry_t into the
  86 * arch-dependent pte representation.
  87 */
  88static inline pte_t swp_entry_to_pte(swp_entry_t entry)
  89{
  90        swp_entry_t arch_entry;
  91
  92        arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  93        return __swp_entry_to_pte(arch_entry);
  94}
  95
  96static inline swp_entry_t radix_to_swp_entry(void *arg)
  97{
  98        swp_entry_t entry;
  99
 100        entry.val = xa_to_value(arg);
 101        return entry;
 102}
 103
 104static inline void *swp_to_radix_entry(swp_entry_t entry)
 105{
 106        return xa_mk_value(entry.val);
 107}
 108
 109#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
 110static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 111{
 112        return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
 113                         page_to_pfn(page));
 114}
 115
 116static inline bool is_device_private_entry(swp_entry_t entry)
 117{
 118        int type = swp_type(entry);
 119        return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
 120}
 121
 122static inline void make_device_private_entry_read(swp_entry_t *entry)
 123{
 124        *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
 125}
 126
 127static inline bool is_write_device_private_entry(swp_entry_t entry)
 128{
 129        return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
 130}
 131
 132static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
 133{
 134        return swp_offset(entry);
 135}
 136
 137static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 138{
 139        return pfn_to_page(swp_offset(entry));
 140}
 141#else /* CONFIG_DEVICE_PRIVATE */
 142static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 143{
 144        return swp_entry(0, 0);
 145}
 146
 147static inline void make_device_private_entry_read(swp_entry_t *entry)
 148{
 149}
 150
 151static inline bool is_device_private_entry(swp_entry_t entry)
 152{
 153        return false;
 154}
 155
 156static inline bool is_write_device_private_entry(swp_entry_t entry)
 157{
 158        return false;
 159}
 160
 161static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
 162{
 163        return 0;
 164}
 165
 166static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 167{
 168        return NULL;
 169}
 170#endif /* CONFIG_DEVICE_PRIVATE */
 171
 172#ifdef CONFIG_MIGRATION
 173static inline swp_entry_t make_migration_entry(struct page *page, int write)
 174{
 175        BUG_ON(!PageLocked(compound_head(page)));
 176
 177        return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
 178                        page_to_pfn(page));
 179}
 180
 181static inline int is_migration_entry(swp_entry_t entry)
 182{
 183        return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
 184                        swp_type(entry) == SWP_MIGRATION_WRITE);
 185}
 186
 187static inline int is_write_migration_entry(swp_entry_t entry)
 188{
 189        return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
 190}
 191
 192static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
 193{
 194        return swp_offset(entry);
 195}
 196
 197static inline struct page *migration_entry_to_page(swp_entry_t entry)
 198{
 199        struct page *p = pfn_to_page(swp_offset(entry));
 200        /*
 201         * Any use of migration entries may only occur while the
 202         * corresponding page is locked
 203         */
 204        BUG_ON(!PageLocked(compound_head(p)));
 205        return p;
 206}
 207
 208static inline void make_migration_entry_read(swp_entry_t *entry)
 209{
 210        *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
 211}
 212
 213extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 214                                        spinlock_t *ptl);
 215extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 216                                        unsigned long address);
 217extern void migration_entry_wait_huge(struct vm_area_struct *vma,
 218                struct mm_struct *mm, pte_t *pte);
 219#else
 220
 221#define make_migration_entry(page, write) swp_entry(0, 0)
 222static inline int is_migration_entry(swp_entry_t swp)
 223{
 224        return 0;
 225}
 226
 227static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
 228{
 229        return 0;
 230}
 231
 232static inline struct page *migration_entry_to_page(swp_entry_t entry)
 233{
 234        return NULL;
 235}
 236
 237static inline void make_migration_entry_read(swp_entry_t *entryp) { }
 238static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 239                                        spinlock_t *ptl) { }
 240static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 241                                         unsigned long address) { }
 242static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
 243                struct mm_struct *mm, pte_t *pte) { }
 244static inline int is_write_migration_entry(swp_entry_t entry)
 245{
 246        return 0;
 247}
 248
 249#endif
 250
 251struct page_vma_mapped_walk;
 252
 253#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 254extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 255                struct page *page);
 256
 257extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
 258                struct page *new);
 259
 260extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
 261
 262static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 263{
 264        swp_entry_t arch_entry;
 265
 266        if (pmd_swp_soft_dirty(pmd))
 267                pmd = pmd_swp_clear_soft_dirty(pmd);
 268        arch_entry = __pmd_to_swp_entry(pmd);
 269        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 270}
 271
 272static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
 273{
 274        swp_entry_t arch_entry;
 275
 276        arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
 277        return __swp_entry_to_pmd(arch_entry);
 278}
 279
 280static inline int is_pmd_migration_entry(pmd_t pmd)
 281{
 282        return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
 283}
 284#else
 285static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 286                struct page *page)
 287{
 288        BUILD_BUG();
 289}
 290
 291static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
 292                struct page *new)
 293{
 294        BUILD_BUG();
 295}
 296
 297static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
 298
 299static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 300{
 301        return swp_entry(0, 0);
 302}
 303
 304static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
 305{
 306        return __pmd(0);
 307}
 308
 309static inline int is_pmd_migration_entry(pmd_t pmd)
 310{
 311        return 0;
 312}
 313#endif
 314
 315#ifdef CONFIG_MEMORY_FAILURE
 316
 317extern atomic_long_t num_poisoned_pages __read_mostly;
 318
 319/*
 320 * Support for hardware poisoned pages
 321 */
 322static inline swp_entry_t make_hwpoison_entry(struct page *page)
 323{
 324        BUG_ON(!PageLocked(page));
 325        return swp_entry(SWP_HWPOISON, page_to_pfn(page));
 326}
 327
 328static inline int is_hwpoison_entry(swp_entry_t entry)
 329{
 330        return swp_type(entry) == SWP_HWPOISON;
 331}
 332
 333static inline void num_poisoned_pages_inc(void)
 334{
 335        atomic_long_inc(&num_poisoned_pages);
 336}
 337
 338static inline void num_poisoned_pages_dec(void)
 339{
 340        atomic_long_dec(&num_poisoned_pages);
 341}
 342
 343#else
 344
 345static inline swp_entry_t make_hwpoison_entry(struct page *page)
 346{
 347        return swp_entry(0, 0);
 348}
 349
 350static inline int is_hwpoison_entry(swp_entry_t swp)
 351{
 352        return 0;
 353}
 354
 355static inline void num_poisoned_pages_inc(void)
 356{
 357}
 358#endif
 359
 360#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \
 361    defined(CONFIG_DEVICE_PRIVATE)
 362static inline int non_swap_entry(swp_entry_t entry)
 363{
 364        return swp_type(entry) >= MAX_SWAPFILES;
 365}
 366#else
 367static inline int non_swap_entry(swp_entry_t entry)
 368{
 369        return 0;
 370}
 371#endif
 372
 373#endif /* CONFIG_MMU */
 374#endif /* _LINUX_SWAPOPS_H */
 375