linux/include/linux/sched/mm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_MM_H
   3#define _LINUX_SCHED_MM_H
   4
   5#include <linux/kernel.h>
   6#include <linux/atomic.h>
   7#include <linux/sched.h>
   8#include <linux/mm_types.h>
   9#include <linux/gfp.h>
  10#include <linux/sync_core.h>
  11
  12/*
  13 * Routines for handling mm_structs
  14 */
  15extern struct mm_struct *mm_alloc(void);
  16
  17/**
  18 * mmgrab() - Pin a &struct mm_struct.
  19 * @mm: The &struct mm_struct to pin.
  20 *
  21 * Make sure that @mm will not get freed even after the owning task
  22 * exits. This doesn't guarantee that the associated address space
  23 * will still exist later on and mmget_not_zero() has to be used before
  24 * accessing it.
  25 *
  26 * This is a preferred way to to pin @mm for a longer/unbounded amount
  27 * of time.
  28 *
  29 * Use mmdrop() to release the reference acquired by mmgrab().
  30 *
  31 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
  32 * of &mm_struct.mm_count vs &mm_struct.mm_users.
  33 */
  34static inline void mmgrab(struct mm_struct *mm)
  35{
  36        atomic_inc(&mm->mm_count);
  37}
  38
  39extern void __mmdrop(struct mm_struct *mm);
  40
  41static inline void mmdrop(struct mm_struct *mm)
  42{
  43        /*
  44         * The implicit full barrier implied by atomic_dec_and_test() is
  45         * required by the membarrier system call before returning to
  46         * user-space, after storing to rq->curr.
  47         */
  48        if (unlikely(atomic_dec_and_test(&mm->mm_count)))
  49                __mmdrop(mm);
  50}
  51
  52/**
  53 * mmget() - Pin the address space associated with a &struct mm_struct.
  54 * @mm: The address space to pin.
  55 *
  56 * Make sure that the address space of the given &struct mm_struct doesn't
  57 * go away. This does not protect against parts of the address space being
  58 * modified or freed, however.
  59 *
  60 * Never use this function to pin this address space for an
  61 * unbounded/indefinite amount of time.
  62 *
  63 * Use mmput() to release the reference acquired by mmget().
  64 *
  65 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
  66 * of &mm_struct.mm_count vs &mm_struct.mm_users.
  67 */
  68static inline void mmget(struct mm_struct *mm)
  69{
  70        atomic_inc(&mm->mm_users);
  71}
  72
  73static inline bool mmget_not_zero(struct mm_struct *mm)
  74{
  75        return atomic_inc_not_zero(&mm->mm_users);
  76}
  77
  78/* mmput gets rid of the mappings and all user-space */
  79extern void mmput(struct mm_struct *);
  80#ifdef CONFIG_MMU
  81/* same as above but performs the slow path from the async context. Can
  82 * be called from the atomic context as well
  83 */
  84void mmput_async(struct mm_struct *);
  85#endif
  86
  87/* Grab a reference to a task's mm, if it is not already going away */
  88extern struct mm_struct *get_task_mm(struct task_struct *task);
  89/*
  90 * Grab a reference to a task's mm, if it is not already going away
  91 * and ptrace_may_access with the mode parameter passed to it
  92 * succeeds.
  93 */
  94extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
  95/* Remove the current tasks stale references to the old mm_struct */
  96extern void mm_release(struct task_struct *, struct mm_struct *);
  97
  98#ifdef CONFIG_MEMCG
  99extern void mm_update_next_owner(struct mm_struct *mm);
 100#else
 101static inline void mm_update_next_owner(struct mm_struct *mm)
 102{
 103}
 104#endif /* CONFIG_MEMCG */
 105
 106#ifdef CONFIG_MMU
 107extern void arch_pick_mmap_layout(struct mm_struct *mm,
 108                                  struct rlimit *rlim_stack);
 109extern unsigned long
 110arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
 111                       unsigned long, unsigned long);
 112extern unsigned long
 113arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 114                          unsigned long len, unsigned long pgoff,
 115                          unsigned long flags);
 116#else
 117static inline void arch_pick_mmap_layout(struct mm_struct *mm,
 118                                         struct rlimit *rlim_stack) {}
 119#endif
 120
 121static inline bool in_vfork(struct task_struct *tsk)
 122{
 123        bool ret;
 124
 125        /*
 126         * need RCU to access ->real_parent if CLONE_VM was used along with
 127         * CLONE_PARENT.
 128         *
 129         * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
 130         * imply CLONE_VM
 131         *
 132         * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
 133         * ->real_parent is not necessarily the task doing vfork(), so in
 134         * theory we can't rely on task_lock() if we want to dereference it.
 135         *
 136         * And in this case we can't trust the real_parent->mm == tsk->mm
 137         * check, it can be false negative. But we do not care, if init or
 138         * another oom-unkillable task does this it should blame itself.
 139         */
 140        rcu_read_lock();
 141        ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
 142        rcu_read_unlock();
 143
 144        return ret;
 145}
 146
 147/*
 148 * Applies per-task gfp context to the given allocation flags.
 149 * PF_MEMALLOC_NOIO implies GFP_NOIO
 150 * PF_MEMALLOC_NOFS implies GFP_NOFS
 151 */
 152static inline gfp_t current_gfp_context(gfp_t flags)
 153{
 154        /*
 155         * NOIO implies both NOIO and NOFS and it is a weaker context
 156         * so always make sure it makes precendence
 157         */
 158        if (unlikely(current->flags & PF_MEMALLOC_NOIO))
 159                flags &= ~(__GFP_IO | __GFP_FS);
 160        else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
 161                flags &= ~__GFP_FS;
 162        return flags;
 163}
 164
 165#ifdef CONFIG_LOCKDEP
 166extern void __fs_reclaim_acquire(void);
 167extern void __fs_reclaim_release(void);
 168extern void fs_reclaim_acquire(gfp_t gfp_mask);
 169extern void fs_reclaim_release(gfp_t gfp_mask);
 170#else
 171static inline void __fs_reclaim_acquire(void) { }
 172static inline void __fs_reclaim_release(void) { }
 173static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
 174static inline void fs_reclaim_release(gfp_t gfp_mask) { }
 175#endif
 176
 177/**
 178 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
 179 *
 180 * This functions marks the beginning of the GFP_NOIO allocation scope.
 181 * All further allocations will implicitly drop __GFP_IO flag and so
 182 * they are safe for the IO critical section from the allocation recursion
 183 * point of view. Use memalloc_noio_restore to end the scope with flags
 184 * returned by this function.
 185 *
 186 * This function is safe to be used from any context.
 187 */
 188static inline unsigned int memalloc_noio_save(void)
 189{
 190        unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
 191        current->flags |= PF_MEMALLOC_NOIO;
 192        return flags;
 193}
 194
 195/**
 196 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
 197 * @flags: Flags to restore.
 198 *
 199 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
 200 * Always make sure that that the given flags is the return value from the
 201 * pairing memalloc_noio_save call.
 202 */
 203static inline void memalloc_noio_restore(unsigned int flags)
 204{
 205        current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
 206}
 207
 208/**
 209 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
 210 *
 211 * This functions marks the beginning of the GFP_NOFS allocation scope.
 212 * All further allocations will implicitly drop __GFP_FS flag and so
 213 * they are safe for the FS critical section from the allocation recursion
 214 * point of view. Use memalloc_nofs_restore to end the scope with flags
 215 * returned by this function.
 216 *
 217 * This function is safe to be used from any context.
 218 */
 219static inline unsigned int memalloc_nofs_save(void)
 220{
 221        unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
 222        current->flags |= PF_MEMALLOC_NOFS;
 223        return flags;
 224}
 225
 226/**
 227 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
 228 * @flags: Flags to restore.
 229 *
 230 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
 231 * Always make sure that that the given flags is the return value from the
 232 * pairing memalloc_nofs_save call.
 233 */
 234static inline void memalloc_nofs_restore(unsigned int flags)
 235{
 236        current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
 237}
 238
 239static inline unsigned int memalloc_noreclaim_save(void)
 240{
 241        unsigned int flags = current->flags & PF_MEMALLOC;
 242        current->flags |= PF_MEMALLOC;
 243        return flags;
 244}
 245
 246static inline void memalloc_noreclaim_restore(unsigned int flags)
 247{
 248        current->flags = (current->flags & ~PF_MEMALLOC) | flags;
 249}
 250
 251#ifdef CONFIG_MEMCG
 252/**
 253 * memalloc_use_memcg - Starts the remote memcg charging scope.
 254 * @memcg: memcg to charge.
 255 *
 256 * This function marks the beginning of the remote memcg charging scope. All the
 257 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
 258 * given memcg.
 259 *
 260 * NOTE: This function is not nesting safe.
 261 */
 262static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
 263{
 264        WARN_ON_ONCE(current->active_memcg);
 265        current->active_memcg = memcg;
 266}
 267
 268/**
 269 * memalloc_unuse_memcg - Ends the remote memcg charging scope.
 270 *
 271 * This function marks the end of the remote memcg charging scope started by
 272 * memalloc_use_memcg().
 273 */
 274static inline void memalloc_unuse_memcg(void)
 275{
 276        current->active_memcg = NULL;
 277}
 278#else
 279static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
 280{
 281}
 282
 283static inline void memalloc_unuse_memcg(void)
 284{
 285}
 286#endif
 287
 288#ifdef CONFIG_MEMBARRIER
 289enum {
 290        MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY                = (1U << 0),
 291        MEMBARRIER_STATE_PRIVATE_EXPEDITED                      = (1U << 1),
 292        MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY                 = (1U << 2),
 293        MEMBARRIER_STATE_GLOBAL_EXPEDITED                       = (1U << 3),
 294        MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY      = (1U << 4),
 295        MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE            = (1U << 5),
 296};
 297
 298enum {
 299        MEMBARRIER_FLAG_SYNC_CORE       = (1U << 0),
 300};
 301
 302#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
 303#include <asm/membarrier.h>
 304#endif
 305
 306static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
 307{
 308        if (likely(!(atomic_read(&mm->membarrier_state) &
 309                     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
 310                return;
 311        sync_core_before_usermode();
 312}
 313
 314static inline void membarrier_execve(struct task_struct *t)
 315{
 316        atomic_set(&t->mm->membarrier_state, 0);
 317}
 318#else
 319#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
 320static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
 321                                             struct mm_struct *next,
 322                                             struct task_struct *tsk)
 323{
 324}
 325#endif
 326static inline void membarrier_execve(struct task_struct *t)
 327{
 328}
 329static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
 330{
 331}
 332#endif
 333
 334#endif /* _LINUX_SCHED_MM_H */
 335