1#ifndef _LINUX_SCHED_MM_H
2#define _LINUX_SCHED_MM_H
3
4#include <linux/kernel.h>
5#include <linux/atomic.h>
6#include <linux/sched.h>
7#include <linux/mm_types.h>
8#include <linux/gfp.h>
9
10
11
12
13extern struct mm_struct * mm_alloc(void);
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32static inline void mmgrab(struct mm_struct *mm)
33{
34 atomic_inc(&mm->mm_count);
35}
36
37
38extern void __mmdrop(struct mm_struct *);
39static inline void mmdrop(struct mm_struct *mm)
40{
41 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
42 __mmdrop(mm);
43}
44
45static inline void mmdrop_async_fn(struct work_struct *work)
46{
47 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
48 __mmdrop(mm);
49}
50
51static inline void mmdrop_async(struct mm_struct *mm)
52{
53 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
54 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
55 schedule_work(&mm->async_put_work);
56 }
57}
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75static inline void mmget(struct mm_struct *mm)
76{
77 atomic_inc(&mm->mm_users);
78}
79
80static inline bool mmget_not_zero(struct mm_struct *mm)
81{
82 return atomic_inc_not_zero(&mm->mm_users);
83}
84
85
86extern void mmput(struct mm_struct *);
87#ifdef CONFIG_MMU
88
89
90
91extern void mmput_async(struct mm_struct *);
92#endif
93
94
95extern struct mm_struct *get_task_mm(struct task_struct *task);
96
97
98
99
100
101extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
102
103extern void mm_release(struct task_struct *, struct mm_struct *);
104
105#ifdef CONFIG_MEMCG
106extern void mm_update_next_owner(struct mm_struct *mm);
107#else
108static inline void mm_update_next_owner(struct mm_struct *mm)
109{
110}
111#endif
112
113#ifdef CONFIG_MMU
114extern void arch_pick_mmap_layout(struct mm_struct *mm);
115extern unsigned long
116arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
117 unsigned long, unsigned long);
118extern unsigned long
119arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
120 unsigned long len, unsigned long pgoff,
121 unsigned long flags);
122#else
123static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
124#endif
125
126static inline bool in_vfork(struct task_struct *tsk)
127{
128 bool ret;
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145 rcu_read_lock();
146 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
147 rcu_read_unlock();
148
149 return ret;
150}
151
152
153
154
155
156
157static inline gfp_t current_gfp_context(gfp_t flags)
158{
159
160
161
162
163 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
164 flags &= ~(__GFP_IO | __GFP_FS);
165 else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
166 flags &= ~__GFP_FS;
167 return flags;
168}
169
170static inline unsigned int memalloc_noio_save(void)
171{
172 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
173 current->flags |= PF_MEMALLOC_NOIO;
174 return flags;
175}
176
177static inline void memalloc_noio_restore(unsigned int flags)
178{
179 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
180}
181
182static inline unsigned int memalloc_nofs_save(void)
183{
184 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
185 current->flags |= PF_MEMALLOC_NOFS;
186 return flags;
187}
188
189static inline void memalloc_nofs_restore(unsigned int flags)
190{
191 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
192}
193
194static inline unsigned int memalloc_noreclaim_save(void)
195{
196 unsigned int flags = current->flags & PF_MEMALLOC;
197 current->flags |= PF_MEMALLOC;
198 return flags;
199}
200
201static inline void memalloc_noreclaim_restore(unsigned int flags)
202{
203 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
204}
205
206#endif
207