linux/include/linux/mempolicy.h
<<
>>
Prefs
   1/*
   2 * NUMA memory policies for Linux.
   3 * Copyright 2003,2004 Andi Kleen SuSE Labs
   4 */
   5#ifndef _LINUX_MEMPOLICY_H
   6#define _LINUX_MEMPOLICY_H 1
   7
   8
   9#include <linux/mmzone.h>
  10#include <linux/slab.h>
  11#include <linux/rbtree.h>
  12#include <linux/spinlock.h>
  13#include <linux/nodemask.h>
  14#include <linux/pagemap.h>
  15#include <uapi/linux/mempolicy.h>
  16
  17struct mm_struct;
  18
  19#ifdef CONFIG_NUMA
  20
  21/*
  22 * Describe a memory policy.
  23 *
  24 * A mempolicy can be either associated with a process or with a VMA.
  25 * For VMA related allocations the VMA policy is preferred, otherwise
  26 * the process policy is used. Interrupts ignore the memory policy
  27 * of the current process.
  28 *
  29 * Locking policy for interlave:
  30 * In process context there is no locking because only the process accesses
  31 * its own state. All vma manipulation is somewhat protected by a down_read on
  32 * mmap_sem.
  33 *
  34 * Freeing policy:
  35 * Mempolicy objects are reference counted.  A mempolicy will be freed when
  36 * mpol_put() decrements the reference count to zero.
  37 *
  38 * Duplicating policy objects:
  39 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
  40 * to the new storage.  The reference count of the new object is initialized
  41 * to 1, representing the caller of mpol_dup().
  42 */
  43struct mempolicy {
  44        atomic_t refcnt;
  45        unsigned short mode;    /* See MPOL_* above */
  46        unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
  47        union {
  48                short            preferred_node; /* preferred */
  49                nodemask_t       nodes;         /* interleave/bind */
  50                /* undefined for default */
  51        } v;
  52        union {
  53                nodemask_t cpuset_mems_allowed; /* relative to these nodes */
  54                nodemask_t user_nodemask;       /* nodemask passed by user */
  55        } w;
  56};
  57
  58/*
  59 * Support for managing mempolicy data objects (clone, copy, destroy)
  60 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  61 */
  62
  63extern void __mpol_put(struct mempolicy *pol);
  64static inline void mpol_put(struct mempolicy *pol)
  65{
  66        if (pol)
  67                __mpol_put(pol);
  68}
  69
  70/*
  71 * Does mempolicy pol need explicit unref after use?
  72 * Currently only needed for shared policies.
  73 */
  74static inline int mpol_needs_cond_ref(struct mempolicy *pol)
  75{
  76        return (pol && (pol->flags & MPOL_F_SHARED));
  77}
  78
  79static inline void mpol_cond_put(struct mempolicy *pol)
  80{
  81        if (mpol_needs_cond_ref(pol))
  82                __mpol_put(pol);
  83}
  84
  85extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
  86static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
  87{
  88        if (pol)
  89                pol = __mpol_dup(pol);
  90        return pol;
  91}
  92
  93#define vma_policy(vma) ((vma)->vm_policy)
  94
  95static inline void mpol_get(struct mempolicy *pol)
  96{
  97        if (pol)
  98                atomic_inc(&pol->refcnt);
  99}
 100
 101extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
 102static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 103{
 104        if (a == b)
 105                return true;
 106        return __mpol_equal(a, b);
 107}
 108
 109/*
 110 * Tree of shared policies for a shared memory region.
 111 * Maintain the policies in a pseudo mm that contains vmas. The vmas
 112 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
 113 * bytes, so that we can work with shared memory segments bigger than
 114 * unsigned long.
 115 */
 116
 117struct sp_node {
 118        struct rb_node nd;
 119        unsigned long start, end;
 120        struct mempolicy *policy;
 121};
 122
 123struct shared_policy {
 124        struct rb_root root;
 125        spinlock_t lock;
 126};
 127
 128int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
 129void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
 130int mpol_set_shared_policy(struct shared_policy *info,
 131                                struct vm_area_struct *vma,
 132                                struct mempolicy *new);
 133void mpol_free_shared_policy(struct shared_policy *p);
 134struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
 135                                            unsigned long idx);
 136
 137struct mempolicy *get_vma_policy(struct task_struct *tsk,
 138                struct vm_area_struct *vma, unsigned long addr);
 139
 140extern void numa_default_policy(void);
 141extern void numa_policy_init(void);
 142extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
 143                                enum mpol_rebind_step step);
 144extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 145extern void mpol_fix_fork_child_flag(struct task_struct *p);
 146
 147extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
 148                                unsigned long addr, gfp_t gfp_flags,
 149                                struct mempolicy **mpol, nodemask_t **nodemask);
 150extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 151extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 152                                const nodemask_t *mask);
 153extern unsigned slab_node(void);
 154
 155extern enum zone_type policy_zone;
 156
 157static inline void check_highest_zone(enum zone_type k)
 158{
 159        if (k > policy_zone && k != ZONE_MOVABLE)
 160                policy_zone = k;
 161}
 162
 163int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 164                     const nodemask_t *to, int flags);
 165
 166
 167#ifdef CONFIG_TMPFS
 168extern int mpol_parse_str(char *str, struct mempolicy **mpol);
 169#endif
 170
 171extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
 172
 173/* Check if a vma is migratable */
 174static inline int vma_migratable(struct vm_area_struct *vma)
 175{
 176        if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 177                return 0;
 178        /*
 179         * Migration allocates pages in the highest zone. If we cannot
 180         * do so then migration (at least from node to node) is not
 181         * possible.
 182         */
 183        if (vma->vm_file &&
 184                gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
 185                                                                < policy_zone)
 186                        return 0;
 187        return 1;
 188}
 189
 190extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
 191
 192#else
 193
 194struct mempolicy {};
 195
 196static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 197{
 198        return true;
 199}
 200
 201static inline void mpol_put(struct mempolicy *p)
 202{
 203}
 204
 205static inline void mpol_cond_put(struct mempolicy *pol)
 206{
 207}
 208
 209static inline void mpol_get(struct mempolicy *pol)
 210{
 211}
 212
 213static inline struct mempolicy *mpol_dup(struct mempolicy *old)
 214{
 215        return NULL;
 216}
 217
 218struct shared_policy {};
 219
 220static inline int mpol_set_shared_policy(struct shared_policy *info,
 221                                        struct vm_area_struct *vma,
 222                                        struct mempolicy *new)
 223{
 224        return -EINVAL;
 225}
 226
 227static inline void mpol_shared_policy_init(struct shared_policy *sp,
 228                                                struct mempolicy *mpol)
 229{
 230}
 231
 232static inline void mpol_free_shared_policy(struct shared_policy *p)
 233{
 234}
 235
 236static inline struct mempolicy *
 237mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 238{
 239        return NULL;
 240}
 241
 242#define vma_policy(vma) NULL
 243
 244static inline int
 245vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 246{
 247        return 0;
 248}
 249
 250static inline void numa_policy_init(void)
 251{
 252}
 253
 254static inline void numa_default_policy(void)
 255{
 256}
 257
 258static inline void mpol_rebind_task(struct task_struct *tsk,
 259                                const nodemask_t *new,
 260                                enum mpol_rebind_step step)
 261{
 262}
 263
 264static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 265{
 266}
 267
 268static inline void mpol_fix_fork_child_flag(struct task_struct *p)
 269{
 270}
 271
 272static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
 273                                unsigned long addr, gfp_t gfp_flags,
 274                                struct mempolicy **mpol, nodemask_t **nodemask)
 275{
 276        *mpol = NULL;
 277        *nodemask = NULL;
 278        return node_zonelist(0, gfp_flags);
 279}
 280
 281static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
 282{
 283        return false;
 284}
 285
 286static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 287                        const nodemask_t *mask)
 288{
 289        return false;
 290}
 291
 292static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 293                                   const nodemask_t *to, int flags)
 294{
 295        return 0;
 296}
 297
 298static inline void check_highest_zone(int k)
 299{
 300}
 301
 302#ifdef CONFIG_TMPFS
 303static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
 304{
 305        return 1;       /* error */
 306}
 307#endif
 308
 309static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
 310{
 311        return 0;
 312}
 313
 314static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
 315                                 unsigned long address)
 316{
 317        return -1; /* no node preference */
 318}
 319
 320#endif /* CONFIG_NUMA */
 321#endif
 322