linux/include/linux/mempolicy.h
<<
>>
Prefs
   1#ifndef _LINUX_MEMPOLICY_H
   2#define _LINUX_MEMPOLICY_H 1
   3
   4#include <linux/errno.h>
   5
   6/*
   7 * NUMA memory policies for Linux.
   8 * Copyright 2003,2004 Andi Kleen SuSE Labs
   9 */
  10
  11/*
  12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
  13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
  14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
  15 */
  16
  17/* Policies */
  18enum {
  19        MPOL_DEFAULT,
  20        MPOL_PREFERRED,
  21        MPOL_BIND,
  22        MPOL_INTERLEAVE,
  23        MPOL_MAX,       /* always last member of enum */
  24};
  25
  26/* Flags for set_mempolicy */
  27#define MPOL_F_STATIC_NODES     (1 << 15)
  28#define MPOL_F_RELATIVE_NODES   (1 << 14)
  29
  30/*
  31 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
  32 * either set_mempolicy() or mbind().
  33 */
  34#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
  35
  36/* Flags for get_mempolicy */
  37#define MPOL_F_NODE     (1<<0)  /* return next IL mode instead of node mask */
  38#define MPOL_F_ADDR     (1<<1)  /* look up vma using address */
  39#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
  40
  41/* Flags for mbind */
  42#define MPOL_MF_STRICT  (1<<0)  /* Verify existing pages in the mapping */
  43#define MPOL_MF_MOVE    (1<<1)  /* Move pages owned by this process to conform to mapping */
  44#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
  45#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
  46
  47/*
  48 * Internal flags that share the struct mempolicy flags word with
  49 * "mode flags".  These flags are allocated from bit 0 up, as they
  50 * are never OR'ed into the mode in mempolicy API arguments.
  51 */
  52#define MPOL_F_SHARED  (1 << 0) /* identify shared policies */
  53#define MPOL_F_LOCAL   (1 << 1) /* preferred local allocation */
  54
  55#ifdef __KERNEL__
  56
  57#include <linux/mmzone.h>
  58#include <linux/slab.h>
  59#include <linux/rbtree.h>
  60#include <linux/spinlock.h>
  61#include <linux/nodemask.h>
  62#include <linux/pagemap.h>
  63
  64struct mm_struct;
  65
  66#ifdef CONFIG_NUMA
  67
  68/*
  69 * Describe a memory policy.
  70 *
  71 * A mempolicy can be either associated with a process or with a VMA.
  72 * For VMA related allocations the VMA policy is preferred, otherwise
  73 * the process policy is used. Interrupts ignore the memory policy
  74 * of the current process.
  75 *
  76 * Locking policy for interlave:
  77 * In process context there is no locking because only the process accesses
  78 * its own state. All vma manipulation is somewhat protected by a down_read on
  79 * mmap_sem.
  80 *
  81 * Freeing policy:
  82 * Mempolicy objects are reference counted.  A mempolicy will be freed when
  83 * mpol_put() decrements the reference count to zero.
  84 *
  85 * Duplicating policy objects:
  86 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
  87 * to the new storage.  The reference count of the new object is initialized
  88 * to 1, representing the caller of mpol_dup().
  89 */
  90struct mempolicy {
  91        atomic_t refcnt;
  92        unsigned short mode;    /* See MPOL_* above */
  93        unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
  94        union {
  95                short            preferred_node; /* preferred */
  96                nodemask_t       nodes;         /* interleave/bind */
  97                /* undefined for default */
  98        } v;
  99        union {
 100                nodemask_t cpuset_mems_allowed; /* relative to these nodes */
 101                nodemask_t user_nodemask;       /* nodemask passed by user */
 102        } w;
 103};
 104
 105/*
 106 * Support for managing mempolicy data objects (clone, copy, destroy)
 107 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
 108 */
 109
 110extern void __mpol_put(struct mempolicy *pol);
 111static inline void mpol_put(struct mempolicy *pol)
 112{
 113        if (pol)
 114                __mpol_put(pol);
 115}
 116
 117/*
 118 * Does mempolicy pol need explicit unref after use?
 119 * Currently only needed for shared policies.
 120 */
 121static inline int mpol_needs_cond_ref(struct mempolicy *pol)
 122{
 123        return (pol && (pol->flags & MPOL_F_SHARED));
 124}
 125
 126static inline void mpol_cond_put(struct mempolicy *pol)
 127{
 128        if (mpol_needs_cond_ref(pol))
 129                __mpol_put(pol);
 130}
 131
 132extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
 133                                          struct mempolicy *frompol);
 134static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
 135                                                struct mempolicy *frompol)
 136{
 137        if (!frompol)
 138                return frompol;
 139        return __mpol_cond_copy(tompol, frompol);
 140}
 141
 142extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
 143static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
 144{
 145        if (pol)
 146                pol = __mpol_dup(pol);
 147        return pol;
 148}
 149
 150#define vma_policy(vma) ((vma)->vm_policy)
 151#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
 152
 153static inline void mpol_get(struct mempolicy *pol)
 154{
 155        if (pol)
 156                atomic_inc(&pol->refcnt);
 157}
 158
 159extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
 160static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
 161{
 162        if (a == b)
 163                return 1;
 164        return __mpol_equal(a, b);
 165}
 166
 167/*
 168 * Tree of shared policies for a shared memory region.
 169 * Maintain the policies in a pseudo mm that contains vmas. The vmas
 170 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
 171 * bytes, so that we can work with shared memory segments bigger than
 172 * unsigned long.
 173 */
 174
 175struct sp_node {
 176        struct rb_node nd;
 177        unsigned long start, end;
 178        struct mempolicy *policy;
 179};
 180
 181struct shared_policy {
 182        struct rb_root root;
 183        spinlock_t lock;
 184};
 185
 186void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
 187int mpol_set_shared_policy(struct shared_policy *info,
 188                                struct vm_area_struct *vma,
 189                                struct mempolicy *new);
 190void mpol_free_shared_policy(struct shared_policy *p);
 191struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
 192                                            unsigned long idx);
 193
 194extern void numa_default_policy(void);
 195extern void numa_policy_init(void);
 196extern void mpol_rebind_task(struct task_struct *tsk,
 197                                        const nodemask_t *new);
 198extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 199extern void mpol_fix_fork_child_flag(struct task_struct *p);
 200
 201extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
 202                                unsigned long addr, gfp_t gfp_flags,
 203                                struct mempolicy **mpol, nodemask_t **nodemask);
 204extern unsigned slab_node(struct mempolicy *policy);
 205
 206extern enum zone_type policy_zone;
 207
 208static inline void check_highest_zone(enum zone_type k)
 209{
 210        if (k > policy_zone && k != ZONE_MOVABLE)
 211                policy_zone = k;
 212}
 213
 214int do_migrate_pages(struct mm_struct *mm,
 215        const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
 216
 217
 218#ifdef CONFIG_TMPFS
 219extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
 220
 221extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
 222                        int no_context);
 223#endif
 224
 225/* Check if a vma is migratable */
 226static inline int vma_migratable(struct vm_area_struct *vma)
 227{
 228        if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
 229                return 0;
 230        /*
 231         * Migration allocates pages in the highest zone. If we cannot
 232         * do so then migration (at least from node to node) is not
 233         * possible.
 234         */
 235        if (vma->vm_file &&
 236                gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
 237                                                                < policy_zone)
 238                        return 0;
 239        return 1;
 240}
 241
 242#else
 243
 244struct mempolicy {};
 245
 246static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
 247{
 248        return 1;
 249}
 250
 251static inline void mpol_put(struct mempolicy *p)
 252{
 253}
 254
 255static inline void mpol_cond_put(struct mempolicy *pol)
 256{
 257}
 258
 259static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
 260                                                struct mempolicy *from)
 261{
 262        return from;
 263}
 264
 265static inline void mpol_get(struct mempolicy *pol)
 266{
 267}
 268
 269static inline struct mempolicy *mpol_dup(struct mempolicy *old)
 270{
 271        return NULL;
 272}
 273
 274struct shared_policy {};
 275
 276static inline int mpol_set_shared_policy(struct shared_policy *info,
 277                                        struct vm_area_struct *vma,
 278                                        struct mempolicy *new)
 279{
 280        return -EINVAL;
 281}
 282
 283static inline void mpol_shared_policy_init(struct shared_policy *sp,
 284                                                struct mempolicy *mpol)
 285{
 286}
 287
 288static inline void mpol_free_shared_policy(struct shared_policy *p)
 289{
 290}
 291
 292static inline struct mempolicy *
 293mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 294{
 295        return NULL;
 296}
 297
 298#define vma_policy(vma) NULL
 299#define vma_set_policy(vma, pol) do {} while(0)
 300
 301static inline void numa_policy_init(void)
 302{
 303}
 304
 305static inline void numa_default_policy(void)
 306{
 307}
 308
 309static inline void mpol_rebind_task(struct task_struct *tsk,
 310                                        const nodemask_t *new)
 311{
 312}
 313
 314static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 315{
 316}
 317
 318static inline void mpol_fix_fork_child_flag(struct task_struct *p)
 319{
 320}
 321
 322static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
 323                                unsigned long addr, gfp_t gfp_flags,
 324                                struct mempolicy **mpol, nodemask_t **nodemask)
 325{
 326        *mpol = NULL;
 327        *nodemask = NULL;
 328        return node_zonelist(0, gfp_flags);
 329}
 330
 331static inline int do_migrate_pages(struct mm_struct *mm,
 332                        const nodemask_t *from_nodes,
 333                        const nodemask_t *to_nodes, int flags)
 334{
 335        return 0;
 336}
 337
 338static inline void check_highest_zone(int k)
 339{
 340}
 341
 342#ifdef CONFIG_TMPFS
 343static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
 344                                int no_context)
 345{
 346        return 1;       /* error */
 347}
 348
 349static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
 350                                int no_context)
 351{
 352        return 0;
 353}
 354#endif
 355
 356#endif /* CONFIG_NUMA */
 357#endif /* __KERNEL__ */
 358
 359#endif
 360