linux/include/linux/migrate.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MIGRATE_H
   3#define _LINUX_MIGRATE_H
   4
   5#include <linux/mm.h>
   6#include <linux/mempolicy.h>
   7#include <linux/migrate_mode.h>
   8#include <linux/hugetlb.h>
   9
  10typedef struct page *new_page_t(struct page *page, unsigned long private,
  11                                int **reason);
  12typedef void free_page_t(struct page *page, unsigned long private);
  13
  14/*
  15 * Return values from addresss_space_operations.migratepage():
  16 * - negative errno on page migration failure;
  17 * - zero on page migration success;
  18 */
  19#define MIGRATEPAGE_SUCCESS             0
  20
  21enum migrate_reason {
  22        MR_COMPACTION,
  23        MR_MEMORY_FAILURE,
  24        MR_MEMORY_HOTPLUG,
  25        MR_SYSCALL,             /* also applies to cpusets */
  26        MR_MEMPOLICY_MBIND,
  27        MR_NUMA_MISPLACED,
  28        MR_CMA,
  29        MR_TYPES
  30};
  31
  32/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
  33extern char *migrate_reason_names[MR_TYPES];
  34
  35static inline struct page *new_page_nodemask(struct page *page,
  36                                int preferred_nid, nodemask_t *nodemask)
  37{
  38        gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
  39        unsigned int order = 0;
  40        struct page *new_page = NULL;
  41
  42        if (PageHuge(page))
  43                return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
  44                                preferred_nid, nodemask);
  45
  46        if (thp_migration_supported() && PageTransHuge(page)) {
  47                order = HPAGE_PMD_ORDER;
  48                gfp_mask |= GFP_TRANSHUGE;
  49        }
  50
  51        if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
  52                gfp_mask |= __GFP_HIGHMEM;
  53
  54        new_page = __alloc_pages_nodemask(gfp_mask, order,
  55                                preferred_nid, nodemask);
  56
  57        if (new_page && PageTransHuge(new_page))
  58                prep_transhuge_page(new_page);
  59
  60        return new_page;
  61}
  62
  63#ifdef CONFIG_MIGRATION
  64
  65extern void putback_movable_pages(struct list_head *l);
  66extern int migrate_page(struct address_space *mapping,
  67                        struct page *newpage, struct page *page,
  68                        enum migrate_mode mode);
  69extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
  70                unsigned long private, enum migrate_mode mode, int reason);
  71extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
  72extern void putback_movable_page(struct page *page);
  73
  74extern int migrate_prep(void);
  75extern int migrate_prep_local(void);
  76extern void migrate_page_states(struct page *newpage, struct page *page);
  77extern void migrate_page_copy(struct page *newpage, struct page *page);
  78extern int migrate_huge_page_move_mapping(struct address_space *mapping,
  79                                  struct page *newpage, struct page *page);
  80extern int migrate_page_move_mapping(struct address_space *mapping,
  81                struct page *newpage, struct page *page,
  82                struct buffer_head *head, enum migrate_mode mode,
  83                int extra_count);
  84#else
  85
  86static inline void putback_movable_pages(struct list_head *l) {}
  87static inline int migrate_pages(struct list_head *l, new_page_t new,
  88                free_page_t free, unsigned long private, enum migrate_mode mode,
  89                int reason)
  90        { return -ENOSYS; }
  91static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
  92        { return -EBUSY; }
  93
  94static inline int migrate_prep(void) { return -ENOSYS; }
  95static inline int migrate_prep_local(void) { return -ENOSYS; }
  96
  97static inline void migrate_page_states(struct page *newpage, struct page *page)
  98{
  99}
 100
 101static inline void migrate_page_copy(struct page *newpage,
 102                                     struct page *page) {}
 103
 104static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
 105                                  struct page *newpage, struct page *page)
 106{
 107        return -ENOSYS;
 108}
 109
 110#endif /* CONFIG_MIGRATION */
 111
 112#ifdef CONFIG_COMPACTION
 113extern int PageMovable(struct page *page);
 114extern void __SetPageMovable(struct page *page, struct address_space *mapping);
 115extern void __ClearPageMovable(struct page *page);
 116#else
 117static inline int PageMovable(struct page *page) { return 0; };
 118static inline void __SetPageMovable(struct page *page,
 119                                struct address_space *mapping)
 120{
 121}
 122static inline void __ClearPageMovable(struct page *page)
 123{
 124}
 125#endif
 126
 127#ifdef CONFIG_NUMA_BALANCING
 128extern bool pmd_trans_migrating(pmd_t pmd);
 129extern int migrate_misplaced_page(struct page *page,
 130                                  struct vm_area_struct *vma, int node);
 131#else
 132static inline bool pmd_trans_migrating(pmd_t pmd)
 133{
 134        return false;
 135}
 136static inline int migrate_misplaced_page(struct page *page,
 137                                         struct vm_area_struct *vma, int node)
 138{
 139        return -EAGAIN; /* can't migrate now */
 140}
 141#endif /* CONFIG_NUMA_BALANCING */
 142
 143#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 144extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 145                        struct vm_area_struct *vma,
 146                        pmd_t *pmd, pmd_t entry,
 147                        unsigned long address,
 148                        struct page *page, int node);
 149#else
 150static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 151                        struct vm_area_struct *vma,
 152                        pmd_t *pmd, pmd_t entry,
 153                        unsigned long address,
 154                        struct page *page, int node)
 155{
 156        return -EAGAIN;
 157}
 158#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
 159
 160
 161#ifdef CONFIG_MIGRATION
 162
 163/*
 164 * Watch out for PAE architecture, which has an unsigned long, and might not
 165 * have enough bits to store all physical address and flags. So far we have
 166 * enough room for all our flags.
 167 */
 168#define MIGRATE_PFN_VALID       (1UL << 0)
 169#define MIGRATE_PFN_MIGRATE     (1UL << 1)
 170#define MIGRATE_PFN_LOCKED      (1UL << 2)
 171#define MIGRATE_PFN_WRITE       (1UL << 3)
 172#define MIGRATE_PFN_DEVICE      (1UL << 4)
 173#define MIGRATE_PFN_ERROR       (1UL << 5)
 174#define MIGRATE_PFN_SHIFT       6
 175
 176static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
 177{
 178        if (!(mpfn & MIGRATE_PFN_VALID))
 179                return NULL;
 180        return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
 181}
 182
 183static inline unsigned long migrate_pfn(unsigned long pfn)
 184{
 185        return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
 186}
 187
 188/*
 189 * struct migrate_vma_ops - migrate operation callback
 190 *
 191 * @alloc_and_copy: alloc destination memory and copy source memory to it
 192 * @finalize_and_map: allow caller to map the successfully migrated pages
 193 *
 194 *
 195 * The alloc_and_copy() callback happens once all source pages have been locked,
 196 * unmapped and checked (checked whether pinned or not). All pages that can be
 197 * migrated will have an entry in the src array set with the pfn value of the
 198 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other
 199 * flags might be set but should be ignored by the callback).
 200 *
 201 * The alloc_and_copy() callback can then allocate destination memory and copy
 202 * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and
 203 * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the
 204 * callback must update each corresponding entry in the dst array with the pfn
 205 * value of the destination page and with the MIGRATE_PFN_VALID and
 206 * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages
 207 * locked, via lock_page()).
 208 *
 209 * At this point the alloc_and_copy() callback is done and returns.
 210 *
 211 * Note that the callback does not have to migrate all the pages that are
 212 * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration
 213 * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also
 214 * set in the src array entry). If the device driver cannot migrate a device
 215 * page back to system memory, then it must set the corresponding dst array
 216 * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to
 217 * access any of the virtual addresses originally backed by this page. Because
 218 * a SIGBUS is such a severe result for the userspace process, the device
 219 * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an
 220 * unrecoverable state.
 221 *
 222 * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we
 223 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
 224 * allowing device driver to allocate device memory for those unback virtual
 225 * address. For this the device driver simply have to allocate device memory
 226 * and properly set the destination entry like for regular migration. Note that
 227 * this can still fails and thus inside the device driver must check if the
 228 * migration was successful for those entry inside the finalize_and_map()
 229 * callback just like for regular migration.
 230 *
 231 * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES
 232 * OR BAD THINGS WILL HAPPEN !
 233 *
 234 *
 235 * The finalize_and_map() callback happens after struct page migration from
 236 * source to destination (destination struct pages are the struct pages for the
 237 * memory allocated by the alloc_and_copy() callback).  Migration can fail, and
 238 * thus the finalize_and_map() allows the driver to inspect which pages were
 239 * successfully migrated, and which were not. Successfully migrated pages will
 240 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
 241 *
 242 * It is safe to update device page table from within the finalize_and_map()
 243 * callback because both destination and source page are still locked, and the
 244 * mmap_sem is held in read mode (hence no one can unmap the range being
 245 * migrated).
 246 *
 247 * Once callback is done cleaning up things and updating its page table (if it
 248 * chose to do so, this is not an obligation) then it returns. At this point,
 249 * the HMM core will finish up the final steps, and the migration is complete.
 250 *
 251 * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY
 252 * ENTRIES OR BAD THINGS WILL HAPPEN !
 253 */
 254struct migrate_vma_ops {
 255        void (*alloc_and_copy)(struct vm_area_struct *vma,
 256                               const unsigned long *src,
 257                               unsigned long *dst,
 258                               unsigned long start,
 259                               unsigned long end,
 260                               void *private);
 261        void (*finalize_and_map)(struct vm_area_struct *vma,
 262                                 const unsigned long *src,
 263                                 const unsigned long *dst,
 264                                 unsigned long start,
 265                                 unsigned long end,
 266                                 void *private);
 267};
 268
 269#if defined(CONFIG_MIGRATE_VMA_HELPER)
 270int migrate_vma(const struct migrate_vma_ops *ops,
 271                struct vm_area_struct *vma,
 272                unsigned long start,
 273                unsigned long end,
 274                unsigned long *src,
 275                unsigned long *dst,
 276                void *private);
 277#else
 278static inline int migrate_vma(const struct migrate_vma_ops *ops,
 279                              struct vm_area_struct *vma,
 280                              unsigned long start,
 281                              unsigned long end,
 282                              unsigned long *src,
 283                              unsigned long *dst,
 284                              void *private)
 285{
 286        return -EINVAL;
 287}
 288#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */
 289
 290#endif /* CONFIG_MIGRATION */
 291
 292#endif /* _LINUX_MIGRATE_H */
 293