linux/include/linux/swapops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SWAPOPS_H
   3#define _LINUX_SWAPOPS_H
   4
   5#include <linux/radix-tree.h>
   6#include <linux/bug.h>
   7
   8/*
   9 * swapcache pages are stored in the swapper_space radix tree.  We want to
  10 * get good packing density in that tree, so the index should be dense in
  11 * the low-order bits.
  12 *
  13 * We arrange the `type' and `offset' fields so that `type' is at the seven
  14 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
  15 * remaining bits.  Although `type' itself needs only five bits, we allow for
  16 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
  17 *
  18 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  19 */
  20#define SWP_TYPE_SHIFT(e)       ((sizeof(e.val) * 8) - \
  21                        (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
  22#define SWP_OFFSET_MASK(e)      ((1UL << SWP_TYPE_SHIFT(e)) - 1)
  23
  24/*
  25 * Store a type+offset into a swp_entry_t in an arch-independent format
  26 */
  27static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
  28{
  29        swp_entry_t ret;
  30
  31        ret.val = (type << SWP_TYPE_SHIFT(ret)) |
  32                        (offset & SWP_OFFSET_MASK(ret));
  33        return ret;
  34}
  35
  36/*
  37 * Extract the `type' field from a swp_entry_t.  The swp_entry_t is in
  38 * arch-independent format
  39 */
  40static inline unsigned swp_type(swp_entry_t entry)
  41{
  42        return (entry.val >> SWP_TYPE_SHIFT(entry));
  43}
  44
  45/*
  46 * Extract the `offset' field from a swp_entry_t.  The swp_entry_t is in
  47 * arch-independent format
  48 */
  49static inline pgoff_t swp_offset(swp_entry_t entry)
  50{
  51        return entry.val & SWP_OFFSET_MASK(entry);
  52}
  53
  54#ifdef CONFIG_MMU
  55/* check whether a pte points to a swap entry */
  56static inline int is_swap_pte(pte_t pte)
  57{
  58        return !pte_none(pte) && !pte_present(pte);
  59}
  60#endif
  61
  62/*
  63 * Convert the arch-dependent pte representation of a swp_entry_t into an
  64 * arch-independent swp_entry_t.
  65 */
  66static inline swp_entry_t pte_to_swp_entry(pte_t pte)
  67{
  68        swp_entry_t arch_entry;
  69
  70        if (pte_swp_soft_dirty(pte))
  71                pte = pte_swp_clear_soft_dirty(pte);
  72        arch_entry = __pte_to_swp_entry(pte);
  73        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  74}
  75
  76/*
  77 * Convert the arch-independent representation of a swp_entry_t into the
  78 * arch-dependent pte representation.
  79 */
  80static inline pte_t swp_entry_to_pte(swp_entry_t entry)
  81{
  82        swp_entry_t arch_entry;
  83
  84        arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  85        return __swp_entry_to_pte(arch_entry);
  86}
  87
  88static inline swp_entry_t radix_to_swp_entry(void *arg)
  89{
  90        swp_entry_t entry;
  91
  92        entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
  93        return entry;
  94}
  95
  96static inline void *swp_to_radix_entry(swp_entry_t entry)
  97{
  98        unsigned long value;
  99
 100        value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
 101        return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
 102}
 103
 104#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
 105static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 106{
 107        return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
 108                         page_to_pfn(page));
 109}
 110
 111static inline bool is_device_private_entry(swp_entry_t entry)
 112{
 113        int type = swp_type(entry);
 114        return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
 115}
 116
 117static inline void make_device_private_entry_read(swp_entry_t *entry)
 118{
 119        *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
 120}
 121
 122static inline bool is_write_device_private_entry(swp_entry_t entry)
 123{
 124        return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
 125}
 126
 127static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 128{
 129        return pfn_to_page(swp_offset(entry));
 130}
 131
 132int device_private_entry_fault(struct vm_area_struct *vma,
 133                       unsigned long addr,
 134                       swp_entry_t entry,
 135                       unsigned int flags,
 136                       pmd_t *pmdp);
 137#else /* CONFIG_DEVICE_PRIVATE */
 138static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 139{
 140        return swp_entry(0, 0);
 141}
 142
 143static inline void make_device_private_entry_read(swp_entry_t *entry)
 144{
 145}
 146
 147static inline bool is_device_private_entry(swp_entry_t entry)
 148{
 149        return false;
 150}
 151
 152static inline bool is_write_device_private_entry(swp_entry_t entry)
 153{
 154        return false;
 155}
 156
 157static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 158{
 159        return NULL;
 160}
 161
 162static inline int device_private_entry_fault(struct vm_area_struct *vma,
 163                                     unsigned long addr,
 164                                     swp_entry_t entry,
 165                                     unsigned int flags,
 166                                     pmd_t *pmdp)
 167{
 168        return VM_FAULT_SIGBUS;
 169}
 170#endif /* CONFIG_DEVICE_PRIVATE */
 171
 172#ifdef CONFIG_MIGRATION
 173static inline swp_entry_t make_migration_entry(struct page *page, int write)
 174{
 175        BUG_ON(!PageLocked(compound_head(page)));
 176
 177        return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
 178                        page_to_pfn(page));
 179}
 180
 181static inline int is_migration_entry(swp_entry_t entry)
 182{
 183        return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
 184                        swp_type(entry) == SWP_MIGRATION_WRITE);
 185}
 186
 187static inline int is_write_migration_entry(swp_entry_t entry)
 188{
 189        return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
 190}
 191
 192static inline struct page *migration_entry_to_page(swp_entry_t entry)
 193{
 194        struct page *p = pfn_to_page(swp_offset(entry));
 195        /*
 196         * Any use of migration entries may only occur while the
 197         * corresponding page is locked
 198         */
 199        BUG_ON(!PageLocked(compound_head(p)));
 200        return p;
 201}
 202
 203static inline void make_migration_entry_read(swp_entry_t *entry)
 204{
 205        *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
 206}
 207
 208extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 209                                        spinlock_t *ptl);
 210extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 211                                        unsigned long address);
 212extern void migration_entry_wait_huge(struct vm_area_struct *vma,
 213                struct mm_struct *mm, pte_t *pte);
 214#else
 215
 216#define make_migration_entry(page, write) swp_entry(0, 0)
 217static inline int is_migration_entry(swp_entry_t swp)
 218{
 219        return 0;
 220}
 221static inline struct page *migration_entry_to_page(swp_entry_t entry)
 222{
 223        return NULL;
 224}
 225
 226static inline void make_migration_entry_read(swp_entry_t *entryp) { }
 227static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 228                                        spinlock_t *ptl) { }
 229static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 230                                         unsigned long address) { }
 231static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
 232                struct mm_struct *mm, pte_t *pte) { }
 233static inline int is_write_migration_entry(swp_entry_t entry)
 234{
 235        return 0;
 236}
 237
 238#endif
 239
 240struct page_vma_mapped_walk;
 241
 242#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 243extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 244                struct page *page);
 245
 246extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
 247                struct page *new);
 248
 249extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
 250
 251static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 252{
 253        swp_entry_t arch_entry;
 254
 255        if (pmd_swp_soft_dirty(pmd))
 256                pmd = pmd_swp_clear_soft_dirty(pmd);
 257        arch_entry = __pmd_to_swp_entry(pmd);
 258        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 259}
 260
 261static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
 262{
 263        swp_entry_t arch_entry;
 264
 265        arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
 266        return __swp_entry_to_pmd(arch_entry);
 267}
 268
 269static inline int is_pmd_migration_entry(pmd_t pmd)
 270{
 271        return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
 272}
 273#else
 274static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
 275                struct page *page)
 276{
 277        BUILD_BUG();
 278}
 279
 280static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
 281                struct page *new)
 282{
 283        BUILD_BUG();
 284}
 285
 286static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
 287
 288static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 289{
 290        return swp_entry(0, 0);
 291}
 292
 293static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
 294{
 295        return __pmd(0);
 296}
 297
 298static inline int is_pmd_migration_entry(pmd_t pmd)
 299{
 300        return 0;
 301}
 302#endif
 303
 304#ifdef CONFIG_MEMORY_FAILURE
 305
 306extern atomic_long_t num_poisoned_pages __read_mostly;
 307
 308/*
 309 * Support for hardware poisoned pages
 310 */
 311static inline swp_entry_t make_hwpoison_entry(struct page *page)
 312{
 313        BUG_ON(!PageLocked(page));
 314        return swp_entry(SWP_HWPOISON, page_to_pfn(page));
 315}
 316
 317static inline int is_hwpoison_entry(swp_entry_t entry)
 318{
 319        return swp_type(entry) == SWP_HWPOISON;
 320}
 321
 322static inline bool test_set_page_hwpoison(struct page *page)
 323{
 324        return TestSetPageHWPoison(page);
 325}
 326
 327static inline void num_poisoned_pages_inc(void)
 328{
 329        atomic_long_inc(&num_poisoned_pages);
 330}
 331
 332static inline void num_poisoned_pages_dec(void)
 333{
 334        atomic_long_dec(&num_poisoned_pages);
 335}
 336
 337#else
 338
 339static inline swp_entry_t make_hwpoison_entry(struct page *page)
 340{
 341        return swp_entry(0, 0);
 342}
 343
 344static inline int is_hwpoison_entry(swp_entry_t swp)
 345{
 346        return 0;
 347}
 348
 349static inline bool test_set_page_hwpoison(struct page *page)
 350{
 351        return false;
 352}
 353
 354static inline void num_poisoned_pages_inc(void)
 355{
 356}
 357#endif
 358
 359#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
 360static inline int non_swap_entry(swp_entry_t entry)
 361{
 362        return swp_type(entry) >= MAX_SWAPFILES;
 363}
 364#else
 365static inline int non_swap_entry(swp_entry_t entry)
 366{
 367        return 0;
 368}
 369#endif
 370
 371#endif /* _LINUX_SWAPOPS_H */
 372