1
2#ifndef _LINUX_SCHED_MM_H
3#define _LINUX_SCHED_MM_H
4
5#include <linux/kernel.h>
6#include <linux/atomic.h>
7#include <linux/sched.h>
8#include <linux/mm_types.h>
9#include <linux/gfp.h>
10#include <linux/sync_core.h>
11
12
13
14
15extern struct mm_struct *mm_alloc(void);
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34static inline void mmgrab(struct mm_struct *mm)
35{
36 atomic_inc(&mm->mm_count);
37}
38
39extern void __mmdrop(struct mm_struct *mm);
40
41static inline void mmdrop(struct mm_struct *mm)
42{
43
44
45
46
47
48 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
49 __mmdrop(mm);
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static inline void mmget(struct mm_struct *mm)
69{
70 atomic_inc(&mm->mm_users);
71}
72
73static inline bool mmget_not_zero(struct mm_struct *mm)
74{
75 return atomic_inc_not_zero(&mm->mm_users);
76}
77
78
79extern void mmput(struct mm_struct *);
80#ifdef CONFIG_MMU
81
82
83
84void mmput_async(struct mm_struct *);
85#endif
86
87
88extern struct mm_struct *get_task_mm(struct task_struct *task);
89
90
91
92
93
94extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
95
96extern void exit_mm_release(struct task_struct *, struct mm_struct *);
97
98extern void exec_mm_release(struct task_struct *, struct mm_struct *);
99
100#ifdef CONFIG_MEMCG
101extern void mm_update_next_owner(struct mm_struct *mm);
102#else
103static inline void mm_update_next_owner(struct mm_struct *mm)
104{
105}
106#endif
107
108#ifdef CONFIG_MMU
109extern void arch_pick_mmap_layout(struct mm_struct *mm,
110 struct rlimit *rlim_stack);
111extern unsigned long
112arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
113 unsigned long, unsigned long);
114extern unsigned long
115arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
116 unsigned long len, unsigned long pgoff,
117 unsigned long flags);
118#else
119static inline void arch_pick_mmap_layout(struct mm_struct *mm,
120 struct rlimit *rlim_stack) {}
121#endif
122
123static inline bool in_vfork(struct task_struct *tsk)
124{
125 bool ret;
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142 rcu_read_lock();
143 ret = tsk->vfork_done &&
144 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
145 rcu_read_unlock();
146
147 return ret;
148}
149
150
151
152
153
154
155
156static inline gfp_t current_gfp_context(gfp_t flags)
157{
158 unsigned int pflags = READ_ONCE(current->flags);
159
160 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
161
162
163
164
165 if (pflags & PF_MEMALLOC_NOIO)
166 flags &= ~(__GFP_IO | __GFP_FS);
167 else if (pflags & PF_MEMALLOC_NOFS)
168 flags &= ~__GFP_FS;
169
170 if (pflags & PF_MEMALLOC_PIN)
171 flags &= ~__GFP_MOVABLE;
172 }
173 return flags;
174}
175
176#ifdef CONFIG_LOCKDEP
177extern void __fs_reclaim_acquire(void);
178extern void __fs_reclaim_release(void);
179extern void fs_reclaim_acquire(gfp_t gfp_mask);
180extern void fs_reclaim_release(gfp_t gfp_mask);
181#else
182static inline void __fs_reclaim_acquire(void) { }
183static inline void __fs_reclaim_release(void) { }
184static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
185static inline void fs_reclaim_release(gfp_t gfp_mask) { }
186#endif
187
188
189
190
191
192
193
194
195
196static inline void might_alloc(gfp_t gfp_mask)
197{
198 fs_reclaim_acquire(gfp_mask);
199 fs_reclaim_release(gfp_mask);
200
201 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
202}
203
204
205
206
207
208
209
210
211
212
213
214
215static inline unsigned int memalloc_noio_save(void)
216{
217 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
218 current->flags |= PF_MEMALLOC_NOIO;
219 return flags;
220}
221
222
223
224
225
226
227
228
229
230static inline void memalloc_noio_restore(unsigned int flags)
231{
232 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
233}
234
235
236
237
238
239
240
241
242
243
244
245
246static inline unsigned int memalloc_nofs_save(void)
247{
248 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
249 current->flags |= PF_MEMALLOC_NOFS;
250 return flags;
251}
252
253
254
255
256
257
258
259
260
261static inline void memalloc_nofs_restore(unsigned int flags)
262{
263 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
264}
265
266static inline unsigned int memalloc_noreclaim_save(void)
267{
268 unsigned int flags = current->flags & PF_MEMALLOC;
269 current->flags |= PF_MEMALLOC;
270 return flags;
271}
272
273static inline void memalloc_noreclaim_restore(unsigned int flags)
274{
275 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
276}
277
278static inline unsigned int memalloc_pin_save(void)
279{
280 unsigned int flags = current->flags & PF_MEMALLOC_PIN;
281
282 current->flags |= PF_MEMALLOC_PIN;
283 return flags;
284}
285
286static inline void memalloc_pin_restore(unsigned int flags)
287{
288 current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
289}
290
291#ifdef CONFIG_MEMCG
292DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
293
294
295
296
297
298
299
300
301
302
303
304static inline struct mem_cgroup *
305set_active_memcg(struct mem_cgroup *memcg)
306{
307 struct mem_cgroup *old;
308
309 if (in_interrupt()) {
310 old = this_cpu_read(int_active_memcg);
311 this_cpu_write(int_active_memcg, memcg);
312 } else {
313 old = current->active_memcg;
314 current->active_memcg = memcg;
315 }
316
317 return old;
318}
319#else
320static inline struct mem_cgroup *
321set_active_memcg(struct mem_cgroup *memcg)
322{
323 return NULL;
324}
325#endif
326
327#ifdef CONFIG_MEMBARRIER
328enum {
329 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
330 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
331 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
332 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
333 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
334 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
335 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
336 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
337};
338
339enum {
340 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
341 MEMBARRIER_FLAG_RSEQ = (1U << 1),
342};
343
344#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
345#include <asm/membarrier.h>
346#endif
347
348static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
349{
350 if (current->mm != mm)
351 return;
352 if (likely(!(atomic_read(&mm->membarrier_state) &
353 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
354 return;
355 sync_core_before_usermode();
356}
357
358extern void membarrier_exec_mmap(struct mm_struct *mm);
359
360extern void membarrier_update_current_mm(struct mm_struct *next_mm);
361
362#else
363#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
364static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
365 struct mm_struct *next,
366 struct task_struct *tsk)
367{
368}
369#endif
370static inline void membarrier_exec_mmap(struct mm_struct *mm)
371{
372}
373static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
374{
375}
376static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
377{
378}
379#endif
380
381#endif
382