linux/include/linux/migrate.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MIGRATE_H
   3#define _LINUX_MIGRATE_H
   4
   5#include <linux/mm.h>
   6#include <linux/mempolicy.h>
   7#include <linux/migrate_mode.h>
   8#include <linux/hugetlb.h>
   9
  10typedef struct page *new_page_t(struct page *page, unsigned long private);
  11typedef void free_page_t(struct page *page, unsigned long private);
  12
  13/*
  14 * Return values from addresss_space_operations.migratepage():
  15 * - negative errno on page migration failure;
  16 * - zero on page migration success;
  17 */
  18#define MIGRATEPAGE_SUCCESS             0
  19
  20enum migrate_reason {
  21        MR_COMPACTION,
  22        MR_MEMORY_FAILURE,
  23        MR_MEMORY_HOTPLUG,
  24        MR_SYSCALL,             /* also applies to cpusets */
  25        MR_MEMPOLICY_MBIND,
  26        MR_NUMA_MISPLACED,
  27        MR_CONTIG_RANGE,
  28        MR_TYPES
  29};
  30
  31/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
  32extern char *migrate_reason_names[MR_TYPES];
  33
  34static inline struct page *new_page_nodemask(struct page *page,
  35                                int preferred_nid, nodemask_t *nodemask)
  36{
  37        gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
  38        unsigned int order = 0;
  39        struct page *new_page = NULL;
  40
  41        if (PageHuge(page))
  42                return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
  43                                preferred_nid, nodemask);
  44
  45        if (PageTransHuge(page)) {
  46                gfp_mask |= GFP_TRANSHUGE;
  47                order = HPAGE_PMD_ORDER;
  48        }
  49
  50        if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
  51                gfp_mask |= __GFP_HIGHMEM;
  52
  53        new_page = __alloc_pages_nodemask(gfp_mask, order,
  54                                preferred_nid, nodemask);
  55
  56        if (new_page && PageTransHuge(new_page))
  57                prep_transhuge_page(new_page);
  58
  59        return new_page;
  60}
  61
  62#ifdef CONFIG_MIGRATION
  63
  64extern void putback_movable_pages(struct list_head *l);
  65extern int migrate_page(struct address_space *mapping,
  66                        struct page *newpage, struct page *page,
  67                        enum migrate_mode mode);
  68extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
  69                unsigned long private, enum migrate_mode mode, int reason);
  70extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
  71extern void putback_movable_page(struct page *page);
  72
  73extern int migrate_prep(void);
  74extern int migrate_prep_local(void);
  75extern void migrate_page_states(struct page *newpage, struct page *page);
  76extern void migrate_page_copy(struct page *newpage, struct page *page);
  77extern int migrate_huge_page_move_mapping(struct address_space *mapping,
  78                                  struct page *newpage, struct page *page);
  79extern int migrate_page_move_mapping(struct address_space *mapping,
  80                struct page *newpage, struct page *page,
  81                struct buffer_head *head, enum migrate_mode mode,
  82                int extra_count);
  83#else
  84
  85static inline void putback_movable_pages(struct list_head *l) {}
  86static inline int migrate_pages(struct list_head *l, new_page_t new,
  87                free_page_t free, unsigned long private, enum migrate_mode mode,
  88                int reason)
  89        { return -ENOSYS; }
  90static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
  91        { return -EBUSY; }
  92
  93static inline int migrate_prep(void) { return -ENOSYS; }
  94static inline int migrate_prep_local(void) { return -ENOSYS; }
  95
  96static inline void migrate_page_states(struct page *newpage, struct page *page)
  97{
  98}
  99
 100static inline void migrate_page_copy(struct page *newpage,
 101                                     struct page *page) {}
 102
 103static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
 104                                  struct page *newpage, struct page *page)
 105{
 106        return -ENOSYS;
 107}
 108
 109#endif /* CONFIG_MIGRATION */
 110
 111#ifdef CONFIG_COMPACTION
 112extern int PageMovable(struct page *page);
 113extern void __SetPageMovable(struct page *page, struct address_space *mapping);
 114extern void __ClearPageMovable(struct page *page);
 115#else
 116static inline int PageMovable(struct page *page) { return 0; };
 117static inline void __SetPageMovable(struct page *page,
 118                                struct address_space *mapping)
 119{
 120}
 121static inline void __ClearPageMovable(struct page *page)
 122{
 123}
 124#endif
 125
 126#ifdef CONFIG_NUMA_BALANCING
 127extern bool pmd_trans_migrating(pmd_t pmd);
 128extern int migrate_misplaced_page(struct page *page,
 129                                  struct vm_area_struct *vma, int node);
 130#else
 131static inline bool pmd_trans_migrating(pmd_t pmd)
 132{
 133        return false;
 134}
 135static inline int migrate_misplaced_page(struct page *page,
 136                                         struct vm_area_struct *vma, int node)
 137{
 138        return -EAGAIN; /* can't migrate now */
 139}
 140#endif /* CONFIG_NUMA_BALANCING */
 141
 142#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 143extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 144                        struct vm_area_struct *vma,
 145                        pmd_t *pmd, pmd_t entry,
 146                        unsigned long address,
 147                        struct page *page, int node);
 148#else
 149static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 150                        struct vm_area_struct *vma,
 151                        pmd_t *pmd, pmd_t entry,
 152                        unsigned long address,
 153                        struct page *page, int node)
 154{
 155        return -EAGAIN;
 156}
 157#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
 158
 159
 160#ifdef CONFIG_MIGRATION
 161
 162/*
 163 * Watch out for PAE architecture, which has an unsigned long, and might not
 164 * have enough bits to store all physical address and flags. So far we have
 165 * enough room for all our flags.
 166 */
 167#define MIGRATE_PFN_VALID       (1UL << 0)
 168#define MIGRATE_PFN_MIGRATE     (1UL << 1)
 169#define MIGRATE_PFN_LOCKED      (1UL << 2)
 170#define MIGRATE_PFN_WRITE       (1UL << 3)
 171#define MIGRATE_PFN_DEVICE      (1UL << 4)
 172#define MIGRATE_PFN_ERROR       (1UL << 5)
 173#define MIGRATE_PFN_SHIFT       6
 174
 175static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
 176{
 177        if (!(mpfn & MIGRATE_PFN_VALID))
 178                return NULL;
 179        return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
 180}
 181
 182static inline unsigned long migrate_pfn(unsigned long pfn)
 183{
 184        return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
 185}
 186
 187/*
 188 * struct migrate_vma_ops - migrate operation callback
 189 *
 190 * @alloc_and_copy: alloc destination memory and copy source memory to it
 191 * @finalize_and_map: allow caller to map the successfully migrated pages
 192 *
 193 *
 194 * The alloc_and_copy() callback happens once all source pages have been locked,
 195 * unmapped and checked (checked whether pinned or not). All pages that can be
 196 * migrated will have an entry in the src array set with the pfn value of the
 197 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other
 198 * flags might be set but should be ignored by the callback).
 199 *
 200 * The alloc_and_copy() callback can then allocate destination memory and copy
 201 * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and
 202 * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the
 203 * callback must update each corresponding entry in the dst array with the pfn
 204 * value of the destination page and with the MIGRATE_PFN_VALID and
 205 * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages
 206 * locked, via lock_page()).
 207 *
 208 * At this point the alloc_and_copy() callback is done and returns.
 209 *
 210 * Note that the callback does not have to migrate all the pages that are
 211 * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration
 212 * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also
 213 * set in the src array entry). If the device driver cannot migrate a device
 214 * page back to system memory, then it must set the corresponding dst array
 215 * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to
 216 * access any of the virtual addresses originally backed by this page. Because
 217 * a SIGBUS is such a severe result for the userspace process, the device
 218 * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an
 219 * unrecoverable state.
 220 *
 221 * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we
 222 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
 223 * allowing device driver to allocate device memory for those unback virtual
 224 * address. For this the device driver simply have to allocate device memory
 225 * and properly set the destination entry like for regular migration. Note that
 226 * this can still fails and thus inside the device driver must check if the
 227 * migration was successful for those entry inside the finalize_and_map()
 228 * callback just like for regular migration.
 229 *
 230 * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES
 231 * OR BAD THINGS WILL HAPPEN !
 232 *
 233 *
 234 * The finalize_and_map() callback happens after struct page migration from
 235 * source to destination (destination struct pages are the struct pages for the
 236 * memory allocated by the alloc_and_copy() callback).  Migration can fail, and
 237 * thus the finalize_and_map() allows the driver to inspect which pages were
 238 * successfully migrated, and which were not. Successfully migrated pages will
 239 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
 240 *
 241 * It is safe to update device page table from within the finalize_and_map()
 242 * callback because both destination and source page are still locked, and the
 243 * mmap_sem is held in read mode (hence no one can unmap the range being
 244 * migrated).
 245 *
 246 * Once callback is done cleaning up things and updating its page table (if it
 247 * chose to do so, this is not an obligation) then it returns. At this point,
 248 * the HMM core will finish up the final steps, and the migration is complete.
 249 *
 250 * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY
 251 * ENTRIES OR BAD THINGS WILL HAPPEN !
 252 */
 253struct migrate_vma_ops {
 254        void (*alloc_and_copy)(struct vm_area_struct *vma,
 255                               const unsigned long *src,
 256                               unsigned long *dst,
 257                               unsigned long start,
 258                               unsigned long end,
 259                               void *private);
 260        void (*finalize_and_map)(struct vm_area_struct *vma,
 261                                 const unsigned long *src,
 262                                 const unsigned long *dst,
 263                                 unsigned long start,
 264                                 unsigned long end,
 265                                 void *private);
 266};
 267
 268#if defined(CONFIG_MIGRATE_VMA_HELPER)
 269int migrate_vma(const struct migrate_vma_ops *ops,
 270                struct vm_area_struct *vma,
 271                unsigned long start,
 272                unsigned long end,
 273                unsigned long *src,
 274                unsigned long *dst,
 275                void *private);
 276#else
 277static inline int migrate_vma(const struct migrate_vma_ops *ops,
 278                              struct vm_area_struct *vma,
 279                              unsigned long start,
 280                              unsigned long end,
 281                              unsigned long *src,
 282                              unsigned long *dst,
 283                              void *private)
 284{
 285        return -EINVAL;
 286}
 287#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */
 288
 289#endif /* CONFIG_MIGRATION */
 290
 291#endif /* _LINUX_MIGRATE_H */
 292