linux/include/linux/mempolicy.h
<<
>>
Prefs
   1/*
   2 * NUMA memory policies for Linux.
   3 * Copyright 2003,2004 Andi Kleen SuSE Labs
   4 */
   5#ifndef _LINUX_MEMPOLICY_H
   6#define _LINUX_MEMPOLICY_H 1
   7
   8
   9#include <linux/mmzone.h>
  10#include <linux/dax.h>
  11#include <linux/slab.h>
  12#include <linux/rbtree.h>
  13#include <linux/spinlock.h>
  14#include <linux/nodemask.h>
  15#include <linux/pagemap.h>
  16#include <uapi/linux/mempolicy.h>
  17
  18struct mm_struct;
  19
  20#ifdef CONFIG_NUMA
  21
  22/*
  23 * Describe a memory policy.
  24 *
  25 * A mempolicy can be either associated with a process or with a VMA.
  26 * For VMA related allocations the VMA policy is preferred, otherwise
  27 * the process policy is used. Interrupts ignore the memory policy
  28 * of the current process.
  29 *
  30 * Locking policy for interlave:
  31 * In process context there is no locking because only the process accesses
  32 * its own state. All vma manipulation is somewhat protected by a down_read on
  33 * mmap_sem.
  34 *
  35 * Freeing policy:
  36 * Mempolicy objects are reference counted.  A mempolicy will be freed when
  37 * mpol_put() decrements the reference count to zero.
  38 *
  39 * Duplicating policy objects:
  40 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
  41 * to the new storage.  The reference count of the new object is initialized
  42 * to 1, representing the caller of mpol_dup().
  43 */
  44struct mempolicy {
  45        atomic_t refcnt;
  46        unsigned short mode;    /* See MPOL_* above */
  47        unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
  48        union {
  49                short            preferred_node; /* preferred */
  50                nodemask_t       nodes;         /* interleave/bind */
  51                /* undefined for default */
  52        } v;
  53        union {
  54                nodemask_t cpuset_mems_allowed; /* relative to these nodes */
  55                nodemask_t user_nodemask;       /* nodemask passed by user */
  56        } w;
  57};
  58
  59/*
  60 * Support for managing mempolicy data objects (clone, copy, destroy)
  61 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  62 */
  63
  64extern void __mpol_put(struct mempolicy *pol);
  65static inline void mpol_put(struct mempolicy *pol)
  66{
  67        if (pol)
  68                __mpol_put(pol);
  69}
  70
  71/*
  72 * Does mempolicy pol need explicit unref after use?
  73 * Currently only needed for shared policies.
  74 */
  75static inline int mpol_needs_cond_ref(struct mempolicy *pol)
  76{
  77        return (pol && (pol->flags & MPOL_F_SHARED));
  78}
  79
  80static inline void mpol_cond_put(struct mempolicy *pol)
  81{
  82        if (mpol_needs_cond_ref(pol))
  83                __mpol_put(pol);
  84}
  85
  86extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
  87static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
  88{
  89        if (pol)
  90                pol = __mpol_dup(pol);
  91        return pol;
  92}
  93
  94#define vma_policy(vma) ((vma)->vm_policy)
  95
  96static inline void mpol_get(struct mempolicy *pol)
  97{
  98        if (pol)
  99                atomic_inc(&pol->refcnt);
 100}
 101
 102extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
 103static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 104{
 105        if (a == b)
 106                return true;
 107        return __mpol_equal(a, b);
 108}
 109
 110/*
 111 * Tree of shared policies for a shared memory region.
 112 * Maintain the policies in a pseudo mm that contains vmas. The vmas
 113 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
 114 * bytes, so that we can work with shared memory segments bigger than
 115 * unsigned long.
 116 */
 117
 118struct sp_node {
 119        struct rb_node nd;
 120        unsigned long start, end;
 121        struct mempolicy *policy;
 122};
 123
 124struct shared_policy {
 125        struct rb_root root;
 126        rwlock_t lock;
 127};
 128
 129int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
 130void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
 131int mpol_set_shared_policy(struct shared_policy *info,
 132                                struct vm_area_struct *vma,
 133                                struct mempolicy *new);
 134void mpol_free_shared_policy(struct shared_policy *p);
 135struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
 136                                            unsigned long idx);
 137
 138struct mempolicy *get_task_policy(struct task_struct *p);
 139struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
 140                unsigned long addr);
 141bool vma_policy_mof(struct vm_area_struct *vma);
 142
 143extern void numa_default_policy(void);
 144extern void numa_policy_init(void);
 145extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
 146                                enum mpol_rebind_step step);
 147extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 148
 149extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
 150                                unsigned long addr, gfp_t gfp_flags,
 151                                struct mempolicy **mpol, nodemask_t **nodemask);
 152extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 153extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 154                                const nodemask_t *mask);
 155extern unsigned int mempolicy_slab_node(void);
 156
 157extern enum zone_type policy_zone;
 158
 159static inline void check_highest_zone(enum zone_type k)
 160{
 161        if (k > policy_zone && k != ZONE_MOVABLE)
 162                policy_zone = k;
 163}
 164
 165int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 166                     const nodemask_t *to, int flags);
 167
 168
 169#ifdef CONFIG_TMPFS
 170extern int mpol_parse_str(char *str, struct mempolicy **mpol);
 171#endif
 172
 173extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
 174
 175/* Check if a vma is migratable */
 176static inline bool vma_migratable(struct vm_area_struct *vma)
 177{
 178        if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 179                return false;
 180
 181        /*
 182         * DAX device mappings require predictable access latency, so avoid
 183         * incurring periodic faults.
 184         */
 185        if (vma_is_dax(vma))
 186                return false;
 187
 188#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 189        if (vma->vm_flags & VM_HUGETLB)
 190                return false;
 191#endif
 192
 193        /*
 194         * Migration allocates pages in the highest zone. If we cannot
 195         * do so then migration (at least from node to node) is not
 196         * possible.
 197         */
 198        if (vma->vm_file &&
 199                gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
 200                                                                < policy_zone)
 201                        return false;
 202        return true;
 203}
 204
 205extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
 206extern void mpol_put_task_policy(struct task_struct *);
 207
 208#else
 209
 210struct mempolicy {};
 211
 212static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 213{
 214        return true;
 215}
 216
 217static inline void mpol_put(struct mempolicy *p)
 218{
 219}
 220
 221static inline void mpol_cond_put(struct mempolicy *pol)
 222{
 223}
 224
 225static inline void mpol_get(struct mempolicy *pol)
 226{
 227}
 228
 229struct shared_policy {};
 230
 231static inline void mpol_shared_policy_init(struct shared_policy *sp,
 232                                                struct mempolicy *mpol)
 233{
 234}
 235
 236static inline void mpol_free_shared_policy(struct shared_policy *p)
 237{
 238}
 239
 240static inline struct mempolicy *
 241mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 242{
 243        return NULL;
 244}
 245
 246#define vma_policy(vma) NULL
 247
 248static inline int
 249vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 250{
 251        return 0;
 252}
 253
 254static inline void numa_policy_init(void)
 255{
 256}
 257
 258static inline void numa_default_policy(void)
 259{
 260}
 261
 262static inline void mpol_rebind_task(struct task_struct *tsk,
 263                                const nodemask_t *new,
 264                                enum mpol_rebind_step step)
 265{
 266}
 267
 268static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 269{
 270}
 271
 272static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
 273                                unsigned long addr, gfp_t gfp_flags,
 274                                struct mempolicy **mpol, nodemask_t **nodemask)
 275{
 276        *mpol = NULL;
 277        *nodemask = NULL;
 278        return node_zonelist(0, gfp_flags);
 279}
 280
 281static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
 282{
 283        return false;
 284}
 285
 286static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 287                                   const nodemask_t *to, int flags)
 288{
 289        return 0;
 290}
 291
 292static inline void check_highest_zone(int k)
 293{
 294}
 295
 296#ifdef CONFIG_TMPFS
 297static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
 298{
 299        return 1;       /* error */
 300}
 301#endif
 302
 303static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
 304                                 unsigned long address)
 305{
 306        return -1; /* no node preference */
 307}
 308
 309static inline void mpol_put_task_policy(struct task_struct *task)
 310{
 311}
 312#endif /* CONFIG_NUMA */
 313#endif
 314