linux/include/linux/swapops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SWAPOPS_H
   3#define _LINUX_SWAPOPS_H
   4
   5#include <linux/radix-tree.h>
   6#include <linux/bug.h>
   7#include <linux/mm_types.h>
   8
   9#ifdef CONFIG_MMU
  10
  11/*
  12 * swapcache pages are stored in the swapper_space radix tree.  We want to
  13 * get good packing density in that tree, so the index should be dense in
  14 * the low-order bits.
  15 *
  16 * We arrange the `type' and `offset' fields so that `type' is at the seven
  17 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
  18 * remaining bits.  Although `type' itself needs only five bits, we allow for
  19 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
  20 *
  21 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  22 */
  23#define SWP_TYPE_SHIFT  (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
  24#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
  25
  26/*
  27 * Store a type+offset into a swp_entry_t in an arch-independent format
  28 */
  29static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
  30{
  31        swp_entry_t ret;
  32
  33        ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
  34        return ret;
  35}
  36
  37/*
  38 * Extract the `type' field from a swp_entry_t.  The swp_entry_t is in
  39 * arch-independent format
  40 */
  41static inline unsigned swp_type(swp_entry_t entry)
  42{
  43        return (entry.val >> SWP_TYPE_SHIFT);
  44}
  45
  46/*
  47 * Extract the `offset' field from a swp_entry_t.  The swp_entry_t is in
  48 * arch-independent format
  49 */
  50static inline pgoff_t swp_offset(swp_entry_t entry)
  51{
  52        return entry.val & SWP_OFFSET_MASK;
  53}
  54
  55/* check whether a pte points to a swap entry */
  56static inline int is_swap_pte(pte_t pte)
  57{
  58        return !pte_none(pte) && !pte_present(pte);
  59}
  60
  61/*
  62 * Convert the arch-dependent pte representation of a swp_entry_t into an
  63 * arch-independent swp_entry_t.
  64 */
  65static inline swp_entry_t pte_to_swp_entry(pte_t pte)
  66{
  67        swp_entry_t arch_entry;
  68
  69        if (pte_swp_soft_dirty(pte))
  70                pte = pte_swp_clear_soft_dirty(pte);
  71        if (pte_swp_uffd_wp(pte))
  72                pte = pte_swp_clear_uffd_wp(pte);
  73        arch_entry = __pte_to_swp_entry(pte);
  74        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  75}
  76
  77/*
  78 * Convert the arch-independent representation of a swp_entry_t into the
  79 * arch-dependent pte representation.
  80 */
  81static inline pte_t swp_entry_to_pte(swp_entry_t entry)
  82{
  83        swp_entry_t arch_entry;
  84
  85        arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  86        return __swp_entry_to_pte(arch_entry);
  87}
  88
  89static inline swp_entry_t radix_to_swp_entry(void *arg)
  90{
  91        swp_entry_t entry;
  92
  93        entry.val = xa_to_value(arg);
  94        return entry;
  95}
  96
  97static inline void *swp_to_radix_entry(swp_entry_t entry)
  98{
  99        return xa_mk_value(entry.val);
 100}
 101
 102#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
 103static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 104{
 105        return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
 106                         page_to_pfn(page));
 107}
 108
 109static inline bool is_device_private_entry(swp_entry_t entry)
 110{
 111        int type = swp_type(entry);
 112        return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
 113}
 114
 115static inline void make_device_private_entry_read(swp_entry_t *entry)
 116{
 117        *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
 118}
 119
 120static inline bool is_write_device_private_entry(swp_entry_t entry)
 121{
 122        return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
 123}
 124
 125static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
 126{
 127        return swp_offset(entry);
 128}
 129
 130static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 131{
 132        return pfn_to_page(swp_offset(entry));
 133}
 134#else /* CONFIG_DEVICE_PRIVATE */
 135static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 136{
 137        return swp_entry(0, 0);
 138}
 139
 140static inline void make_device_private_entry_read(swp_entry_t *entry)
 141{
 142}
 143
 144static inline bool is_device_private_entry(swp_entry_t entry)
 145{
 146        return false;
 147}
 148
 149static inline bool is_write_device_private_entry(swp_entry_t entry)
 150{
 151        return false;
 152}
 153
 154static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
 155{
 156        return 0;
 157}
 158
 159static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 160{
 161        return NULL;
 162}
 163#endif /* CONFIG_DEVICE_PRIVATE */
 164
 165#ifdef CONFIG_MIGRATION
 166static inline swp_entry_t make_migration_entry(struct page *page, int write)
 167{
 168        BUG_ON(!PageLocked(compound_head(page)));
 169
 170        return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
 171                        page_to_pfn(page));
 172}
 173
 174static inline int is_migration_entry(swp_entry_t entry)
 175{
 176        return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
 177                        swp_type(entry) == SWP_MIGRATION_WRITE);
 178}
 179
 180static inline int is_write_migration_entry(swp_entry_t entry)
 181{
 182        return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
 183}
 184
 185static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
 186{
 187        return swp_offset(entry);
 188}
 189
 190static inline struct page *migration_entry_to_page(swp_entry_t entry)
 191{
 192        struct page *p = pfn_to_page(swp_offset(entry));
 193        /*
 194         * Any use of migration entries may only occur while the
 195         * corresponding page is locked
 196         */
 197        BUG_ON(!PageLocked(compound_head(p)));
 198        return p;
 199}
 200
 201static inline void make_migration_entry_read(swp_entry_t *entry)
 202{
 203        *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
 204}
 205
 206extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 207                                        spinlock_t *ptl);
 208extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 209                                        unsigned long address);
 210extern void migration_entry_wait_huge(struct vm_area_struct *vma,
 211                struct mm_struct *mm, pte_t *pte);
 212#else
 213
 214#define make_migration_entry(page, write) swp_entry(0, 0)
 215static inline int is_migration_entry(swp_entry_t swp)
 216{
 217        return 0;
 218}
 219
 220static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
 221{
 222        return 0;
 223}
 224
 225static inline struct page *migration_entry_to_page(swp_entry_t entry)
 226{
 227        return NULL;
 228}
 229
 230static inline void make_migration_entry_read(swp_entry_t *entryp) { }
 231static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 232                                        spinlock_t *ptl) { }
 233static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 234                                         unsigned long address) { }
 235static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
 236                struct mm_struct *mm, pte_t *pte) { }
 237static inline int is_write_migration_entry(swp_entry_t entry)
 238{
 239        return 0;
 240}
 241
 242#endif
 243
 244struct page_vma_mapped_walk;
 245
 246#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 247extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 248                struct page *page);
 249
 250extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
 251                struct page *new);
 252
 253extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
 254
 255static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 256{
 257        swp_entry_t arch_entry;
 258
 259        if (pmd_swp_soft_dirty(pmd))
 260                pmd = pmd_swp_clear_soft_dirty(pmd);
 261        arch_entry = __pmd_to_swp_entry(pmd);
 262        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 263}
 264
 265static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
 266{
 267        swp_entry_t arch_entry;
 268
 269        arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
 270        return __swp_entry_to_pmd(arch_entry);
 271}
 272
 273static inline int is_pmd_migration_entry(pmd_t pmd)
 274{
 275        return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
 276}
 277#else
 278static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 279                struct page *page)
 280{
 281        BUILD_BUG();
 282}
 283
 284static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
 285                struct page *new)
 286{
 287        BUILD_BUG();
 288}
 289
 290static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
 291
 292static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 293{
 294        return swp_entry(0, 0);
 295}
 296
 297static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
 298{
 299        return __pmd(0);
 300}
 301
 302static inline int is_pmd_migration_entry(pmd_t pmd)
 303{
 304        return 0;
 305}
 306#endif
 307
 308#ifdef CONFIG_MEMORY_FAILURE
 309
 310extern atomic_long_t num_poisoned_pages __read_mostly;
 311
 312/*
 313 * Support for hardware poisoned pages
 314 */
 315static inline swp_entry_t make_hwpoison_entry(struct page *page)
 316{
 317        BUG_ON(!PageLocked(page));
 318        return swp_entry(SWP_HWPOISON, page_to_pfn(page));
 319}
 320
 321static inline int is_hwpoison_entry(swp_entry_t entry)
 322{
 323        return swp_type(entry) == SWP_HWPOISON;
 324}
 325
 326static inline void num_poisoned_pages_inc(void)
 327{
 328        atomic_long_inc(&num_poisoned_pages);
 329}
 330
 331static inline void num_poisoned_pages_dec(void)
 332{
 333        atomic_long_dec(&num_poisoned_pages);
 334}
 335
 336#else
 337
 338static inline swp_entry_t make_hwpoison_entry(struct page *page)
 339{
 340        return swp_entry(0, 0);
 341}
 342
 343static inline int is_hwpoison_entry(swp_entry_t swp)
 344{
 345        return 0;
 346}
 347
 348static inline void num_poisoned_pages_inc(void)
 349{
 350}
 351#endif
 352
 353#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \
 354    defined(CONFIG_DEVICE_PRIVATE)
 355static inline int non_swap_entry(swp_entry_t entry)
 356{
 357        return swp_type(entry) >= MAX_SWAPFILES;
 358}
 359#else
 360static inline int non_swap_entry(swp_entry_t entry)
 361{
 362        return 0;
 363}
 364#endif
 365
 366#endif /* CONFIG_MMU */
 367#endif /* _LINUX_SWAPOPS_H */
 368