linux/include/linux/mempolicy.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * NUMA memory policies for Linux.
   4 * Copyright 2003,2004 Andi Kleen SuSE Labs
   5 */
   6#ifndef _LINUX_MEMPOLICY_H
   7#define _LINUX_MEMPOLICY_H 1
   8
   9
  10#include <linux/mmzone.h>
  11#include <linux/dax.h>
  12#include <linux/slab.h>
  13#include <linux/rbtree.h>
  14#include <linux/spinlock.h>
  15#include <linux/nodemask.h>
  16#include <linux/pagemap.h>
  17#include <uapi/linux/mempolicy.h>
  18
  19struct mm_struct;
  20
  21#ifdef CONFIG_NUMA
  22
  23/*
  24 * Describe a memory policy.
  25 *
  26 * A mempolicy can be either associated with a process or with a VMA.
  27 * For VMA related allocations the VMA policy is preferred, otherwise
  28 * the process policy is used. Interrupts ignore the memory policy
  29 * of the current process.
  30 *
  31 * Locking policy for interlave:
  32 * In process context there is no locking because only the process accesses
  33 * its own state. All vma manipulation is somewhat protected by a down_read on
  34 * mmap_sem.
  35 *
  36 * Freeing policy:
  37 * Mempolicy objects are reference counted.  A mempolicy will be freed when
  38 * mpol_put() decrements the reference count to zero.
  39 *
  40 * Duplicating policy objects:
  41 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
  42 * to the new storage.  The reference count of the new object is initialized
  43 * to 1, representing the caller of mpol_dup().
  44 */
  45struct mempolicy {
  46        atomic_t refcnt;
  47        unsigned short mode;    /* See MPOL_* above */
  48        unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
  49        union {
  50                short            preferred_node; /* preferred */
  51                nodemask_t       nodes;         /* interleave/bind */
  52                /* undefined for default */
  53        } v;
  54        union {
  55                nodemask_t cpuset_mems_allowed; /* relative to these nodes */
  56                nodemask_t user_nodemask;       /* nodemask passed by user */
  57        } w;
  58};
  59
  60/*
  61 * Support for managing mempolicy data objects (clone, copy, destroy)
  62 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  63 */
  64
  65extern void __mpol_put(struct mempolicy *pol);
  66static inline void mpol_put(struct mempolicy *pol)
  67{
  68        if (pol)
  69                __mpol_put(pol);
  70}
  71
  72/*
  73 * Does mempolicy pol need explicit unref after use?
  74 * Currently only needed for shared policies.
  75 */
  76static inline int mpol_needs_cond_ref(struct mempolicy *pol)
  77{
  78        return (pol && (pol->flags & MPOL_F_SHARED));
  79}
  80
  81static inline void mpol_cond_put(struct mempolicy *pol)
  82{
  83        if (mpol_needs_cond_ref(pol))
  84                __mpol_put(pol);
  85}
  86
  87extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
  88static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
  89{
  90        if (pol)
  91                pol = __mpol_dup(pol);
  92        return pol;
  93}
  94
  95#define vma_policy(vma) ((vma)->vm_policy)
  96
  97static inline void mpol_get(struct mempolicy *pol)
  98{
  99        if (pol)
 100                atomic_inc(&pol->refcnt);
 101}
 102
 103extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
 104static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 105{
 106        if (a == b)
 107                return true;
 108        return __mpol_equal(a, b);
 109}
 110
 111/*
 112 * Tree of shared policies for a shared memory region.
 113 * Maintain the policies in a pseudo mm that contains vmas. The vmas
 114 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
 115 * bytes, so that we can work with shared memory segments bigger than
 116 * unsigned long.
 117 */
 118
 119struct sp_node {
 120        struct rb_node nd;
 121        unsigned long start, end;
 122        struct mempolicy *policy;
 123};
 124
 125struct shared_policy {
 126        struct rb_root root;
 127        rwlock_t lock;
 128};
 129
 130int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
 131void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
 132int mpol_set_shared_policy(struct shared_policy *info,
 133                                struct vm_area_struct *vma,
 134                                struct mempolicy *new);
 135void mpol_free_shared_policy(struct shared_policy *p);
 136struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
 137                                            unsigned long idx);
 138
 139struct mempolicy *get_task_policy(struct task_struct *p);
 140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
 141                unsigned long addr);
 142struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
 143                                                unsigned long addr);
 144bool vma_policy_mof(struct vm_area_struct *vma);
 145
 146extern void numa_default_policy(void);
 147extern void numa_policy_init(void);
 148extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
 149extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 150
 151extern int huge_node(struct vm_area_struct *vma,
 152                                unsigned long addr, gfp_t gfp_flags,
 153                                struct mempolicy **mpol, nodemask_t **nodemask);
 154extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 155extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 156                                const nodemask_t *mask);
 157extern unsigned int mempolicy_slab_node(void);
 158
 159extern enum zone_type policy_zone;
 160
 161static inline void check_highest_zone(enum zone_type k)
 162{
 163        if (k > policy_zone && k != ZONE_MOVABLE)
 164                policy_zone = k;
 165}
 166
 167int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 168                     const nodemask_t *to, int flags);
 169
 170
 171#ifdef CONFIG_TMPFS
 172extern int mpol_parse_str(char *str, struct mempolicy **mpol);
 173#endif
 174
 175extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
 176
 177/* Check if a vma is migratable */
 178static inline bool vma_migratable(struct vm_area_struct *vma)
 179{
 180        if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 181                return false;
 182
 183        /*
 184         * DAX device mappings require predictable access latency, so avoid
 185         * incurring periodic faults.
 186         */
 187        if (vma_is_dax(vma))
 188                return false;
 189
 190#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 191        if (vma->vm_flags & VM_HUGETLB)
 192                return false;
 193#endif
 194
 195        /*
 196         * Migration allocates pages in the highest zone. If we cannot
 197         * do so then migration (at least from node to node) is not
 198         * possible.
 199         */
 200        if (vma->vm_file &&
 201                gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
 202                                                                < policy_zone)
 203                        return false;
 204        return true;
 205}
 206
 207extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
 208extern void mpol_put_task_policy(struct task_struct *);
 209
 210#else
 211
 212struct mempolicy {};
 213
 214static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 215{
 216        return true;
 217}
 218
 219static inline void mpol_put(struct mempolicy *p)
 220{
 221}
 222
 223static inline void mpol_cond_put(struct mempolicy *pol)
 224{
 225}
 226
 227static inline void mpol_get(struct mempolicy *pol)
 228{
 229}
 230
 231struct shared_policy {};
 232
 233static inline void mpol_shared_policy_init(struct shared_policy *sp,
 234                                                struct mempolicy *mpol)
 235{
 236}
 237
 238static inline void mpol_free_shared_policy(struct shared_policy *p)
 239{
 240}
 241
 242static inline struct mempolicy *
 243mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 244{
 245        return NULL;
 246}
 247
 248#define vma_policy(vma) NULL
 249
 250static inline int
 251vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 252{
 253        return 0;
 254}
 255
 256static inline void numa_policy_init(void)
 257{
 258}
 259
 260static inline void numa_default_policy(void)
 261{
 262}
 263
 264static inline void mpol_rebind_task(struct task_struct *tsk,
 265                                const nodemask_t *new)
 266{
 267}
 268
 269static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 270{
 271}
 272
 273static inline int huge_node(struct vm_area_struct *vma,
 274                                unsigned long addr, gfp_t gfp_flags,
 275                                struct mempolicy **mpol, nodemask_t **nodemask)
 276{
 277        *mpol = NULL;
 278        *nodemask = NULL;
 279        return 0;
 280}
 281
 282static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
 283{
 284        return false;
 285}
 286
 287static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 288                                   const nodemask_t *to, int flags)
 289{
 290        return 0;
 291}
 292
 293static inline void check_highest_zone(int k)
 294{
 295}
 296
 297#ifdef CONFIG_TMPFS
 298static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
 299{
 300        return 1;       /* error */
 301}
 302#endif
 303
 304static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
 305                                 unsigned long address)
 306{
 307        return -1; /* no node preference */
 308}
 309
 310static inline void mpol_put_task_policy(struct task_struct *task)
 311{
 312}
 313#endif /* CONFIG_NUMA */
 314#endif
 315