linux/include/linux/mempolicy.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * NUMA memory policies for Linux.
   4 * Copyright 2003,2004 Andi Kleen SuSE Labs
   5 */
   6#ifndef _LINUX_MEMPOLICY_H
   7#define _LINUX_MEMPOLICY_H 1
   8
   9
  10#include <linux/mmzone.h>
  11#include <linux/dax.h>
  12#include <linux/slab.h>
  13#include <linux/rbtree.h>
  14#include <linux/spinlock.h>
  15#include <linux/nodemask.h>
  16#include <linux/pagemap.h>
  17#include <uapi/linux/mempolicy.h>
  18
  19struct mm_struct;
  20
  21#ifdef CONFIG_NUMA
  22
  23/*
  24 * Describe a memory policy.
  25 *
  26 * A mempolicy can be either associated with a process or with a VMA.
  27 * For VMA related allocations the VMA policy is preferred, otherwise
  28 * the process policy is used. Interrupts ignore the memory policy
  29 * of the current process.
  30 *
  31 * Locking policy for interlave:
  32 * In process context there is no locking because only the process accesses
  33 * its own state. All vma manipulation is somewhat protected by a down_read on
  34 * mmap_sem.
  35 *
  36 * Freeing policy:
  37 * Mempolicy objects are reference counted.  A mempolicy will be freed when
  38 * mpol_put() decrements the reference count to zero.
  39 *
  40 * Duplicating policy objects:
  41 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
  42 * to the new storage.  The reference count of the new object is initialized
  43 * to 1, representing the caller of mpol_dup().
  44 */
  45struct mempolicy {
  46        atomic_t refcnt;
  47        unsigned short mode;    /* See MPOL_* above */
  48        unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
  49        union {
  50                short            preferred_node; /* preferred */
  51                nodemask_t       nodes;         /* interleave/bind */
  52                /* undefined for default */
  53        } v;
  54        union {
  55                nodemask_t cpuset_mems_allowed; /* relative to these nodes */
  56                nodemask_t user_nodemask;       /* nodemask passed by user */
  57        } w;
  58};
  59
  60/*
  61 * Support for managing mempolicy data objects (clone, copy, destroy)
  62 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  63 */
  64
  65extern void __mpol_put(struct mempolicy *pol);
  66static inline void mpol_put(struct mempolicy *pol)
  67{
  68        if (pol)
  69                __mpol_put(pol);
  70}
  71
  72/*
  73 * Does mempolicy pol need explicit unref after use?
  74 * Currently only needed for shared policies.
  75 */
  76static inline int mpol_needs_cond_ref(struct mempolicy *pol)
  77{
  78        return (pol && (pol->flags & MPOL_F_SHARED));
  79}
  80
  81static inline void mpol_cond_put(struct mempolicy *pol)
  82{
  83        if (mpol_needs_cond_ref(pol))
  84                __mpol_put(pol);
  85}
  86
  87extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
  88static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
  89{
  90        if (pol)
  91                pol = __mpol_dup(pol);
  92        return pol;
  93}
  94
  95#define vma_policy(vma) ((vma)->vm_policy)
  96
  97static inline void mpol_get(struct mempolicy *pol)
  98{
  99        if (pol)
 100                atomic_inc(&pol->refcnt);
 101}
 102
 103extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
 104static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 105{
 106        if (a == b)
 107                return true;
 108        return __mpol_equal(a, b);
 109}
 110
 111/*
 112 * Tree of shared policies for a shared memory region.
 113 * Maintain the policies in a pseudo mm that contains vmas. The vmas
 114 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
 115 * bytes, so that we can work with shared memory segments bigger than
 116 * unsigned long.
 117 */
 118
 119struct sp_node {
 120        struct rb_node nd;
 121        unsigned long start, end;
 122        struct mempolicy *policy;
 123};
 124
 125struct shared_policy {
 126        struct rb_root root;
 127        rwlock_t lock;
 128};
 129
 130int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
 131void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
 132int mpol_set_shared_policy(struct shared_policy *info,
 133                                struct vm_area_struct *vma,
 134                                struct mempolicy *new);
 135void mpol_free_shared_policy(struct shared_policy *p);
 136struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
 137                                            unsigned long idx);
 138
 139struct mempolicy *get_task_policy(struct task_struct *p);
 140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
 141                unsigned long addr);
 142bool vma_policy_mof(struct vm_area_struct *vma);
 143
 144extern void numa_default_policy(void);
 145extern void numa_policy_init(void);
 146extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
 147extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 148
 149extern int huge_node(struct vm_area_struct *vma,
 150                                unsigned long addr, gfp_t gfp_flags,
 151                                struct mempolicy **mpol, nodemask_t **nodemask);
 152extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 153extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 154                                const nodemask_t *mask);
 155extern unsigned int mempolicy_slab_node(void);
 156
 157extern enum zone_type policy_zone;
 158
 159static inline void check_highest_zone(enum zone_type k)
 160{
 161        if (k > policy_zone && k != ZONE_MOVABLE)
 162                policy_zone = k;
 163}
 164
 165int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 166                     const nodemask_t *to, int flags);
 167
 168
 169#ifdef CONFIG_TMPFS
 170extern int mpol_parse_str(char *str, struct mempolicy **mpol);
 171#endif
 172
 173extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
 174
 175/* Check if a vma is migratable */
 176static inline bool vma_migratable(struct vm_area_struct *vma)
 177{
 178        if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 179                return false;
 180
 181        /*
 182         * DAX device mappings require predictable access latency, so avoid
 183         * incurring periodic faults.
 184         */
 185        if (vma_is_dax(vma))
 186                return false;
 187
 188#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 189        if (vma->vm_flags & VM_HUGETLB)
 190                return false;
 191#endif
 192
 193        /*
 194         * Migration allocates pages in the highest zone. If we cannot
 195         * do so then migration (at least from node to node) is not
 196         * possible.
 197         */
 198        if (vma->vm_file &&
 199                gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
 200                                                                < policy_zone)
 201                        return false;
 202        return true;
 203}
 204
 205extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
 206extern void mpol_put_task_policy(struct task_struct *);
 207
 208#else
 209
 210struct mempolicy {};
 211
 212static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 213{
 214        return true;
 215}
 216
 217static inline void mpol_put(struct mempolicy *p)
 218{
 219}
 220
 221static inline void mpol_cond_put(struct mempolicy *pol)
 222{
 223}
 224
 225static inline void mpol_get(struct mempolicy *pol)
 226{
 227}
 228
 229struct shared_policy {};
 230
 231static inline void mpol_shared_policy_init(struct shared_policy *sp,
 232                                                struct mempolicy *mpol)
 233{
 234}
 235
 236static inline void mpol_free_shared_policy(struct shared_policy *p)
 237{
 238}
 239
 240static inline struct mempolicy *
 241mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 242{
 243        return NULL;
 244}
 245
 246#define vma_policy(vma) NULL
 247
 248static inline int
 249vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 250{
 251        return 0;
 252}
 253
 254static inline void numa_policy_init(void)
 255{
 256}
 257
 258static inline void numa_default_policy(void)
 259{
 260}
 261
 262static inline void mpol_rebind_task(struct task_struct *tsk,
 263                                const nodemask_t *new)
 264{
 265}
 266
 267static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 268{
 269}
 270
 271static inline int huge_node(struct vm_area_struct *vma,
 272                                unsigned long addr, gfp_t gfp_flags,
 273                                struct mempolicy **mpol, nodemask_t **nodemask)
 274{
 275        *mpol = NULL;
 276        *nodemask = NULL;
 277        return 0;
 278}
 279
 280static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
 281{
 282        return false;
 283}
 284
 285static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 286                                   const nodemask_t *to, int flags)
 287{
 288        return 0;
 289}
 290
 291static inline void check_highest_zone(int k)
 292{
 293}
 294
 295#ifdef CONFIG_TMPFS
 296static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
 297{
 298        return 1;       /* error */
 299}
 300#endif
 301
 302static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
 303                                 unsigned long address)
 304{
 305        return -1; /* no node preference */
 306}
 307
 308static inline void mpol_put_task_policy(struct task_struct *task)
 309{
 310}
 311#endif /* CONFIG_NUMA */
 312#endif
 313