linux/include/linux/sched/mm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_MM_H
   3#define _LINUX_SCHED_MM_H
   4
   5#include <linux/kernel.h>
   6#include <linux/atomic.h>
   7#include <linux/sched.h>
   8#include <linux/mm_types.h>
   9#include <linux/gfp.h>
  10#include <linux/sync_core.h>
  11
  12/*
  13 * Routines for handling mm_structs
  14 */
  15extern struct mm_struct *mm_alloc(void);
  16
  17/**
  18 * mmgrab() - Pin a &struct mm_struct.
  19 * @mm: The &struct mm_struct to pin.
  20 *
  21 * Make sure that @mm will not get freed even after the owning task
  22 * exits. This doesn't guarantee that the associated address space
  23 * will still exist later on and mmget_not_zero() has to be used before
  24 * accessing it.
  25 *
  26 * This is a preferred way to pin @mm for a longer/unbounded amount
  27 * of time.
  28 *
  29 * Use mmdrop() to release the reference acquired by mmgrab().
  30 *
  31 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
  32 * of &mm_struct.mm_count vs &mm_struct.mm_users.
  33 */
  34static inline void mmgrab(struct mm_struct *mm)
  35{
  36        atomic_inc(&mm->mm_count);
  37}
  38
  39extern void __mmdrop(struct mm_struct *mm);
  40
  41static inline void mmdrop(struct mm_struct *mm)
  42{
  43        /*
  44         * The implicit full barrier implied by atomic_dec_and_test() is
  45         * required by the membarrier system call before returning to
  46         * user-space, after storing to rq->curr.
  47         */
  48        if (unlikely(atomic_dec_and_test(&mm->mm_count)))
  49                __mmdrop(mm);
  50}
  51
  52/**
  53 * mmget() - Pin the address space associated with a &struct mm_struct.
  54 * @mm: The address space to pin.
  55 *
  56 * Make sure that the address space of the given &struct mm_struct doesn't
  57 * go away. This does not protect against parts of the address space being
  58 * modified or freed, however.
  59 *
  60 * Never use this function to pin this address space for an
  61 * unbounded/indefinite amount of time.
  62 *
  63 * Use mmput() to release the reference acquired by mmget().
  64 *
  65 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
  66 * of &mm_struct.mm_count vs &mm_struct.mm_users.
  67 */
  68static inline void mmget(struct mm_struct *mm)
  69{
  70        atomic_inc(&mm->mm_users);
  71}
  72
  73static inline bool mmget_not_zero(struct mm_struct *mm)
  74{
  75        return atomic_inc_not_zero(&mm->mm_users);
  76}
  77
  78/* mmput gets rid of the mappings and all user-space */
  79extern void mmput(struct mm_struct *);
  80#ifdef CONFIG_MMU
  81/* same as above but performs the slow path from the async context. Can
  82 * be called from the atomic context as well
  83 */
  84void mmput_async(struct mm_struct *);
  85#endif
  86
  87/* Grab a reference to a task's mm, if it is not already going away */
  88extern struct mm_struct *get_task_mm(struct task_struct *task);
  89/*
  90 * Grab a reference to a task's mm, if it is not already going away
  91 * and ptrace_may_access with the mode parameter passed to it
  92 * succeeds.
  93 */
  94extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
  95/* Remove the current tasks stale references to the old mm_struct on exit() */
  96extern void exit_mm_release(struct task_struct *, struct mm_struct *);
  97/* Remove the current tasks stale references to the old mm_struct on exec() */
  98extern void exec_mm_release(struct task_struct *, struct mm_struct *);
  99
 100#ifdef CONFIG_MEMCG
 101extern void mm_update_next_owner(struct mm_struct *mm);
 102#else
 103static inline void mm_update_next_owner(struct mm_struct *mm)
 104{
 105}
 106#endif /* CONFIG_MEMCG */
 107
 108#ifdef CONFIG_MMU
 109extern void arch_pick_mmap_layout(struct mm_struct *mm,
 110                                  struct rlimit *rlim_stack);
 111extern unsigned long
 112arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
 113                       unsigned long, unsigned long);
 114extern unsigned long
 115arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 116                          unsigned long len, unsigned long pgoff,
 117                          unsigned long flags);
 118#else
 119static inline void arch_pick_mmap_layout(struct mm_struct *mm,
 120                                         struct rlimit *rlim_stack) {}
 121#endif
 122
 123static inline bool in_vfork(struct task_struct *tsk)
 124{
 125        bool ret;
 126
 127        /*
 128         * need RCU to access ->real_parent if CLONE_VM was used along with
 129         * CLONE_PARENT.
 130         *
 131         * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
 132         * imply CLONE_VM
 133         *
 134         * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
 135         * ->real_parent is not necessarily the task doing vfork(), so in
 136         * theory we can't rely on task_lock() if we want to dereference it.
 137         *
 138         * And in this case we can't trust the real_parent->mm == tsk->mm
 139         * check, it can be false negative. But we do not care, if init or
 140         * another oom-unkillable task does this it should blame itself.
 141         */
 142        rcu_read_lock();
 143        ret = tsk->vfork_done &&
 144                        rcu_dereference(tsk->real_parent)->mm == tsk->mm;
 145        rcu_read_unlock();
 146
 147        return ret;
 148}
 149
 150/*
 151 * Applies per-task gfp context to the given allocation flags.
 152 * PF_MEMALLOC_NOIO implies GFP_NOIO
 153 * PF_MEMALLOC_NOFS implies GFP_NOFS
 154 * PF_MEMALLOC_PIN  implies !GFP_MOVABLE
 155 */
 156static inline gfp_t current_gfp_context(gfp_t flags)
 157{
 158        unsigned int pflags = READ_ONCE(current->flags);
 159
 160        if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
 161                /*
 162                 * NOIO implies both NOIO and NOFS and it is a weaker context
 163                 * so always make sure it makes precedence
 164                 */
 165                if (pflags & PF_MEMALLOC_NOIO)
 166                        flags &= ~(__GFP_IO | __GFP_FS);
 167                else if (pflags & PF_MEMALLOC_NOFS)
 168                        flags &= ~__GFP_FS;
 169
 170                if (pflags & PF_MEMALLOC_PIN)
 171                        flags &= ~__GFP_MOVABLE;
 172        }
 173        return flags;
 174}
 175
 176#ifdef CONFIG_LOCKDEP
 177extern void __fs_reclaim_acquire(void);
 178extern void __fs_reclaim_release(void);
 179extern void fs_reclaim_acquire(gfp_t gfp_mask);
 180extern void fs_reclaim_release(gfp_t gfp_mask);
 181#else
 182static inline void __fs_reclaim_acquire(void) { }
 183static inline void __fs_reclaim_release(void) { }
 184static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
 185static inline void fs_reclaim_release(gfp_t gfp_mask) { }
 186#endif
 187
 188/**
 189 * might_alloc - Mark possible allocation sites
 190 * @gfp_mask: gfp_t flags that would be used to allocate
 191 *
 192 * Similar to might_sleep() and other annotations, this can be used in functions
 193 * that might allocate, but often don't. Compiles to nothing without
 194 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
 195 */
 196static inline void might_alloc(gfp_t gfp_mask)
 197{
 198        fs_reclaim_acquire(gfp_mask);
 199        fs_reclaim_release(gfp_mask);
 200
 201        might_sleep_if(gfpflags_allow_blocking(gfp_mask));
 202}
 203
 204/**
 205 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
 206 *
 207 * This functions marks the beginning of the GFP_NOIO allocation scope.
 208 * All further allocations will implicitly drop __GFP_IO flag and so
 209 * they are safe for the IO critical section from the allocation recursion
 210 * point of view. Use memalloc_noio_restore to end the scope with flags
 211 * returned by this function.
 212 *
 213 * This function is safe to be used from any context.
 214 */
 215static inline unsigned int memalloc_noio_save(void)
 216{
 217        unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
 218        current->flags |= PF_MEMALLOC_NOIO;
 219        return flags;
 220}
 221
 222/**
 223 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
 224 * @flags: Flags to restore.
 225 *
 226 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
 227 * Always make sure that the given flags is the return value from the
 228 * pairing memalloc_noio_save call.
 229 */
 230static inline void memalloc_noio_restore(unsigned int flags)
 231{
 232        current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
 233}
 234
 235/**
 236 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
 237 *
 238 * This functions marks the beginning of the GFP_NOFS allocation scope.
 239 * All further allocations will implicitly drop __GFP_FS flag and so
 240 * they are safe for the FS critical section from the allocation recursion
 241 * point of view. Use memalloc_nofs_restore to end the scope with flags
 242 * returned by this function.
 243 *
 244 * This function is safe to be used from any context.
 245 */
 246static inline unsigned int memalloc_nofs_save(void)
 247{
 248        unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
 249        current->flags |= PF_MEMALLOC_NOFS;
 250        return flags;
 251}
 252
 253/**
 254 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
 255 * @flags: Flags to restore.
 256 *
 257 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
 258 * Always make sure that the given flags is the return value from the
 259 * pairing memalloc_nofs_save call.
 260 */
 261static inline void memalloc_nofs_restore(unsigned int flags)
 262{
 263        current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
 264}
 265
 266static inline unsigned int memalloc_noreclaim_save(void)
 267{
 268        unsigned int flags = current->flags & PF_MEMALLOC;
 269        current->flags |= PF_MEMALLOC;
 270        return flags;
 271}
 272
 273static inline void memalloc_noreclaim_restore(unsigned int flags)
 274{
 275        current->flags = (current->flags & ~PF_MEMALLOC) | flags;
 276}
 277
 278static inline unsigned int memalloc_pin_save(void)
 279{
 280        unsigned int flags = current->flags & PF_MEMALLOC_PIN;
 281
 282        current->flags |= PF_MEMALLOC_PIN;
 283        return flags;
 284}
 285
 286static inline void memalloc_pin_restore(unsigned int flags)
 287{
 288        current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
 289}
 290
 291#ifdef CONFIG_MEMCG
 292DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
 293/**
 294 * set_active_memcg - Starts the remote memcg charging scope.
 295 * @memcg: memcg to charge.
 296 *
 297 * This function marks the beginning of the remote memcg charging scope. All the
 298 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
 299 * given memcg.
 300 *
 301 * NOTE: This function can nest. Users must save the return value and
 302 * reset the previous value after their own charging scope is over.
 303 */
 304static inline struct mem_cgroup *
 305set_active_memcg(struct mem_cgroup *memcg)
 306{
 307        struct mem_cgroup *old;
 308
 309        if (in_interrupt()) {
 310                old = this_cpu_read(int_active_memcg);
 311                this_cpu_write(int_active_memcg, memcg);
 312        } else {
 313                old = current->active_memcg;
 314                current->active_memcg = memcg;
 315        }
 316
 317        return old;
 318}
 319#else
 320static inline struct mem_cgroup *
 321set_active_memcg(struct mem_cgroup *memcg)
 322{
 323        return NULL;
 324}
 325#endif
 326
 327#ifdef CONFIG_MEMBARRIER
 328enum {
 329        MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY                = (1U << 0),
 330        MEMBARRIER_STATE_PRIVATE_EXPEDITED                      = (1U << 1),
 331        MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY                 = (1U << 2),
 332        MEMBARRIER_STATE_GLOBAL_EXPEDITED                       = (1U << 3),
 333        MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY      = (1U << 4),
 334        MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE            = (1U << 5),
 335        MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY           = (1U << 6),
 336        MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ                 = (1U << 7),
 337};
 338
 339enum {
 340        MEMBARRIER_FLAG_SYNC_CORE       = (1U << 0),
 341        MEMBARRIER_FLAG_RSEQ            = (1U << 1),
 342};
 343
 344#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
 345#include <asm/membarrier.h>
 346#endif
 347
 348static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
 349{
 350        if (current->mm != mm)
 351                return;
 352        if (likely(!(atomic_read(&mm->membarrier_state) &
 353                     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
 354                return;
 355        sync_core_before_usermode();
 356}
 357
 358extern void membarrier_exec_mmap(struct mm_struct *mm);
 359
 360extern void membarrier_update_current_mm(struct mm_struct *next_mm);
 361
 362#else
 363#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
 364static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
 365                                             struct mm_struct *next,
 366                                             struct task_struct *tsk)
 367{
 368}
 369#endif
 370static inline void membarrier_exec_mmap(struct mm_struct *mm)
 371{
 372}
 373static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
 374{
 375}
 376static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
 377{
 378}
 379#endif
 380
 381#endif /* _LINUX_SCHED_MM_H */
 382