linux/include/linux/migrate.h
<<
>>
Prefs
   1#ifndef _LINUX_MIGRATE_H
   2#define _LINUX_MIGRATE_H
   3
   4#include <linux/mm.h>
   5#include <linux/mempolicy.h>
   6#include <linux/migrate_mode.h>
   7
   8typedef struct page *new_page_t(struct page *page, unsigned long private,
   9                                int **reason);
  10typedef void free_page_t(struct page *page, unsigned long private);
  11
  12/*
  13 * Return values from addresss_space_operations.migratepage():
  14 * - negative errno on page migration failure;
  15 * - zero on page migration success;
  16 *
  17 * The balloon page migration introduces this special case where a 'distinct'
  18 * return code is used to flag a successful page migration to unmap_and_move().
  19 * This approach is necessary because page migration can race against balloon
  20 * deflation procedure, and for such case we could introduce a nasty page leak
  21 * if a successfully migrated balloon page gets released concurrently with
  22 * migration's unmap_and_move() wrap-up steps.
  23 */
  24#define MIGRATEPAGE_SUCCESS             0
  25#define MIGRATEPAGE_BALLOON_SUCCESS     1 /* special ret code for balloon page
  26                                           * sucessful migration case.
  27                                           */
  28enum migrate_reason {
  29        MR_COMPACTION,
  30        MR_MEMORY_FAILURE,
  31        MR_MEMORY_HOTPLUG,
  32        MR_SYSCALL,             /* also applies to cpusets */
  33        MR_MEMPOLICY_MBIND,
  34        MR_NUMA_MISPLACED,
  35        MR_CMA
  36};
  37
  38#ifdef CONFIG_MIGRATION
  39
  40extern void putback_movable_pages(struct list_head *l);
  41extern int migrate_page(struct address_space *,
  42                        struct page *, struct page *, enum migrate_mode);
  43extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
  44                unsigned long private, enum migrate_mode mode, int reason);
  45
  46extern int migrate_prep(void);
  47extern int migrate_prep_local(void);
  48extern int migrate_vmas(struct mm_struct *mm,
  49                const nodemask_t *from, const nodemask_t *to,
  50                unsigned long flags);
  51extern void migrate_page_copy(struct page *newpage, struct page *page);
  52extern int migrate_huge_page_move_mapping(struct address_space *mapping,
  53                                  struct page *newpage, struct page *page);
  54extern int migrate_page_move_mapping(struct address_space *mapping,
  55                struct page *newpage, struct page *page,
  56                struct buffer_head *head, enum migrate_mode mode,
  57                int extra_count);
  58#else
  59
  60static inline void putback_movable_pages(struct list_head *l) {}
  61static inline int migrate_pages(struct list_head *l, new_page_t new,
  62                free_page_t free, unsigned long private, enum migrate_mode mode,
  63                int reason)
  64        { return -ENOSYS; }
  65
  66static inline int migrate_prep(void) { return -ENOSYS; }
  67static inline int migrate_prep_local(void) { return -ENOSYS; }
  68
  69static inline int migrate_vmas(struct mm_struct *mm,
  70                const nodemask_t *from, const nodemask_t *to,
  71                unsigned long flags)
  72{
  73        return -ENOSYS;
  74}
  75
  76static inline void migrate_page_copy(struct page *newpage,
  77                                     struct page *page) {}
  78
  79static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
  80                                  struct page *newpage, struct page *page)
  81{
  82        return -ENOSYS;
  83}
  84
  85/* Possible settings for the migrate_page() method in address_operations */
  86#define migrate_page NULL
  87
  88#endif /* CONFIG_MIGRATION */
  89
  90#ifdef CONFIG_NUMA_BALANCING
  91extern bool pmd_trans_migrating(pmd_t pmd);
  92extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
  93extern int migrate_misplaced_page(struct page *page,
  94                                  struct vm_area_struct *vma, int node);
  95extern bool migrate_ratelimited(int node);
  96#else
  97static inline bool pmd_trans_migrating(pmd_t pmd)
  98{
  99        return false;
 100}
 101static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
 102{
 103}
 104static inline int migrate_misplaced_page(struct page *page,
 105                                         struct vm_area_struct *vma, int node)
 106{
 107        return -EAGAIN; /* can't migrate now */
 108}
 109static inline bool migrate_ratelimited(int node)
 110{
 111        return false;
 112}
 113#endif /* CONFIG_NUMA_BALANCING */
 114
 115#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 116extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 117                        struct vm_area_struct *vma,
 118                        pmd_t *pmd, pmd_t entry,
 119                        unsigned long address,
 120                        struct page *page, int node);
 121#else
 122static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 123                        struct vm_area_struct *vma,
 124                        pmd_t *pmd, pmd_t entry,
 125                        unsigned long address,
 126                        struct page *page, int node)
 127{
 128        return -EAGAIN;
 129}
 130#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
 131
 132#endif /* _LINUX_MIGRATE_H */
 133