linux/include/linux/sched/mm.h
<<
>>
Prefs
   1#ifndef _LINUX_SCHED_MM_H
   2#define _LINUX_SCHED_MM_H
   3
   4#include <linux/kernel.h>
   5#include <linux/atomic.h>
   6#include <linux/sched.h>
   7#include <linux/mm_types.h>
   8#include <linux/gfp.h>
   9
  10/*
  11 * Routines for handling mm_structs
  12 */
  13extern struct mm_struct * mm_alloc(void);
  14
  15/**
  16 * mmgrab() - Pin a &struct mm_struct.
  17 * @mm: The &struct mm_struct to pin.
  18 *
  19 * Make sure that @mm will not get freed even after the owning task
  20 * exits. This doesn't guarantee that the associated address space
  21 * will still exist later on and mmget_not_zero() has to be used before
  22 * accessing it.
  23 *
  24 * This is a preferred way to to pin @mm for a longer/unbounded amount
  25 * of time.
  26 *
  27 * Use mmdrop() to release the reference acquired by mmgrab().
  28 *
  29 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
  30 * of &mm_struct.mm_count vs &mm_struct.mm_users.
  31 */
  32static inline void mmgrab(struct mm_struct *mm)
  33{
  34        atomic_inc(&mm->mm_count);
  35}
  36
  37/* mmdrop drops the mm and the page tables */
  38extern void __mmdrop(struct mm_struct *);
  39static inline void mmdrop(struct mm_struct *mm)
  40{
  41        if (unlikely(atomic_dec_and_test(&mm->mm_count)))
  42                __mmdrop(mm);
  43}
  44
  45static inline void mmdrop_async_fn(struct work_struct *work)
  46{
  47        struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
  48        __mmdrop(mm);
  49}
  50
  51static inline void mmdrop_async(struct mm_struct *mm)
  52{
  53        if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
  54                INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
  55                schedule_work(&mm->async_put_work);
  56        }
  57}
  58
  59/**
  60 * mmget() - Pin the address space associated with a &struct mm_struct.
  61 * @mm: The address space to pin.
  62 *
  63 * Make sure that the address space of the given &struct mm_struct doesn't
  64 * go away. This does not protect against parts of the address space being
  65 * modified or freed, however.
  66 *
  67 * Never use this function to pin this address space for an
  68 * unbounded/indefinite amount of time.
  69 *
  70 * Use mmput() to release the reference acquired by mmget().
  71 *
  72 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
  73 * of &mm_struct.mm_count vs &mm_struct.mm_users.
  74 */
  75static inline void mmget(struct mm_struct *mm)
  76{
  77        atomic_inc(&mm->mm_users);
  78}
  79
  80static inline bool mmget_not_zero(struct mm_struct *mm)
  81{
  82        return atomic_inc_not_zero(&mm->mm_users);
  83}
  84
  85/* mmput gets rid of the mappings and all user-space */
  86extern void mmput(struct mm_struct *);
  87#ifdef CONFIG_MMU
  88/* same as above but performs the slow path from the async context. Can
  89 * be called from the atomic context as well
  90 */
  91extern void mmput_async(struct mm_struct *);
  92#endif
  93
  94/* Grab a reference to a task's mm, if it is not already going away */
  95extern struct mm_struct *get_task_mm(struct task_struct *task);
  96/*
  97 * Grab a reference to a task's mm, if it is not already going away
  98 * and ptrace_may_access with the mode parameter passed to it
  99 * succeeds.
 100 */
 101extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
 102/* Remove the current tasks stale references to the old mm_struct */
 103extern void mm_release(struct task_struct *, struct mm_struct *);
 104
 105#ifdef CONFIG_MEMCG
 106extern void mm_update_next_owner(struct mm_struct *mm);
 107#else
 108static inline void mm_update_next_owner(struct mm_struct *mm)
 109{
 110}
 111#endif /* CONFIG_MEMCG */
 112
 113#ifdef CONFIG_MMU
 114extern void arch_pick_mmap_layout(struct mm_struct *mm);
 115extern unsigned long
 116arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
 117                       unsigned long, unsigned long);
 118extern unsigned long
 119arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 120                          unsigned long len, unsigned long pgoff,
 121                          unsigned long flags);
 122#else
 123static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
 124#endif
 125
 126static inline bool in_vfork(struct task_struct *tsk)
 127{
 128        bool ret;
 129
 130        /*
 131         * need RCU to access ->real_parent if CLONE_VM was used along with
 132         * CLONE_PARENT.
 133         *
 134         * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
 135         * imply CLONE_VM
 136         *
 137         * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
 138         * ->real_parent is not necessarily the task doing vfork(), so in
 139         * theory we can't rely on task_lock() if we want to dereference it.
 140         *
 141         * And in this case we can't trust the real_parent->mm == tsk->mm
 142         * check, it can be false negative. But we do not care, if init or
 143         * another oom-unkillable task does this it should blame itself.
 144         */
 145        rcu_read_lock();
 146        ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
 147        rcu_read_unlock();
 148
 149        return ret;
 150}
 151
 152/*
 153 * Applies per-task gfp context to the given allocation flags.
 154 * PF_MEMALLOC_NOIO implies GFP_NOIO
 155 * PF_MEMALLOC_NOFS implies GFP_NOFS
 156 */
 157static inline gfp_t current_gfp_context(gfp_t flags)
 158{
 159        /*
 160         * NOIO implies both NOIO and NOFS and it is a weaker context
 161         * so always make sure it makes precendence
 162         */
 163        if (unlikely(current->flags & PF_MEMALLOC_NOIO))
 164                flags &= ~(__GFP_IO | __GFP_FS);
 165        else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
 166                flags &= ~__GFP_FS;
 167        return flags;
 168}
 169
 170static inline unsigned int memalloc_noio_save(void)
 171{
 172        unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
 173        current->flags |= PF_MEMALLOC_NOIO;
 174        return flags;
 175}
 176
 177static inline void memalloc_noio_restore(unsigned int flags)
 178{
 179        current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
 180}
 181
 182static inline unsigned int memalloc_nofs_save(void)
 183{
 184        unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
 185        current->flags |= PF_MEMALLOC_NOFS;
 186        return flags;
 187}
 188
 189static inline void memalloc_nofs_restore(unsigned int flags)
 190{
 191        current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
 192}
 193
 194static inline unsigned int memalloc_noreclaim_save(void)
 195{
 196        unsigned int flags = current->flags & PF_MEMALLOC;
 197        current->flags |= PF_MEMALLOC;
 198        return flags;
 199}
 200
 201static inline void memalloc_noreclaim_restore(unsigned int flags)
 202{
 203        current->flags = (current->flags & ~PF_MEMALLOC) | flags;
 204}
 205
 206#endif /* _LINUX_SCHED_MM_H */
 207