1
2#ifndef _LINUX_SCHED_MM_H
3#define _LINUX_SCHED_MM_H
4
5#include <linux/kernel.h>
6#include <linux/atomic.h>
7#include <linux/sched.h>
8#include <linux/mm_types.h>
9#include <linux/gfp.h>
10#include <linux/sync_core.h>
11
12
13
14
15extern struct mm_struct *mm_alloc(void);
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34static inline void mmgrab(struct mm_struct *mm)
35{
36 atomic_inc(&mm->mm_count);
37}
38
39extern void __mmdrop(struct mm_struct *mm);
40
41static inline void mmdrop(struct mm_struct *mm)
42{
43
44
45
46
47
48 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
49 __mmdrop(mm);
50}
51
52#ifdef CONFIG_PREEMPT_RT
53
54
55
56
57static inline void __mmdrop_delayed(struct rcu_head *rhp)
58{
59 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
60
61 __mmdrop(mm);
62}
63
64
65
66
67
68static inline void mmdrop_sched(struct mm_struct *mm)
69{
70
71 if (atomic_dec_and_test(&mm->mm_count))
72 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
73}
74#else
75static inline void mmdrop_sched(struct mm_struct *mm)
76{
77 mmdrop(mm);
78}
79#endif
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97static inline void mmget(struct mm_struct *mm)
98{
99 atomic_inc(&mm->mm_users);
100}
101
102static inline bool mmget_not_zero(struct mm_struct *mm)
103{
104 return atomic_inc_not_zero(&mm->mm_users);
105}
106
107
108extern void mmput(struct mm_struct *);
109#ifdef CONFIG_MMU
110
111
112
113void mmput_async(struct mm_struct *);
114#endif
115
116
117extern struct mm_struct *get_task_mm(struct task_struct *task);
118
119
120
121
122
123extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
124
125extern void exit_mm_release(struct task_struct *, struct mm_struct *);
126
127extern void exec_mm_release(struct task_struct *, struct mm_struct *);
128
129#ifdef CONFIG_MEMCG
130extern void mm_update_next_owner(struct mm_struct *mm);
131#else
132static inline void mm_update_next_owner(struct mm_struct *mm)
133{
134}
135#endif
136
137#ifdef CONFIG_MMU
138extern void arch_pick_mmap_layout(struct mm_struct *mm,
139 struct rlimit *rlim_stack);
140extern unsigned long
141arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
142 unsigned long, unsigned long);
143extern unsigned long
144arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
145 unsigned long len, unsigned long pgoff,
146 unsigned long flags);
147#else
148static inline void arch_pick_mmap_layout(struct mm_struct *mm,
149 struct rlimit *rlim_stack) {}
150#endif
151
152static inline bool in_vfork(struct task_struct *tsk)
153{
154 bool ret;
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171 rcu_read_lock();
172 ret = tsk->vfork_done &&
173 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
174 rcu_read_unlock();
175
176 return ret;
177}
178
179
180
181
182
183
184
185static inline gfp_t current_gfp_context(gfp_t flags)
186{
187 unsigned int pflags = READ_ONCE(current->flags);
188
189 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
190
191
192
193
194 if (pflags & PF_MEMALLOC_NOIO)
195 flags &= ~(__GFP_IO | __GFP_FS);
196 else if (pflags & PF_MEMALLOC_NOFS)
197 flags &= ~__GFP_FS;
198
199 if (pflags & PF_MEMALLOC_PIN)
200 flags &= ~__GFP_MOVABLE;
201 }
202 return flags;
203}
204
205#ifdef CONFIG_LOCKDEP
206extern void __fs_reclaim_acquire(unsigned long ip);
207extern void __fs_reclaim_release(unsigned long ip);
208extern void fs_reclaim_acquire(gfp_t gfp_mask);
209extern void fs_reclaim_release(gfp_t gfp_mask);
210#else
211static inline void __fs_reclaim_acquire(unsigned long ip) { }
212static inline void __fs_reclaim_release(unsigned long ip) { }
213static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
214static inline void fs_reclaim_release(gfp_t gfp_mask) { }
215#endif
216
217
218
219
220
221
222
223
224
225static inline void might_alloc(gfp_t gfp_mask)
226{
227 fs_reclaim_acquire(gfp_mask);
228 fs_reclaim_release(gfp_mask);
229
230 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
231}
232
233
234
235
236
237
238
239
240
241
242
243
244static inline unsigned int memalloc_noio_save(void)
245{
246 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
247 current->flags |= PF_MEMALLOC_NOIO;
248 return flags;
249}
250
251
252
253
254
255
256
257
258
259static inline void memalloc_noio_restore(unsigned int flags)
260{
261 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
262}
263
264
265
266
267
268
269
270
271
272
273
274
275static inline unsigned int memalloc_nofs_save(void)
276{
277 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
278 current->flags |= PF_MEMALLOC_NOFS;
279 return flags;
280}
281
282
283
284
285
286
287
288
289
290static inline void memalloc_nofs_restore(unsigned int flags)
291{
292 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
293}
294
295static inline unsigned int memalloc_noreclaim_save(void)
296{
297 unsigned int flags = current->flags & PF_MEMALLOC;
298 current->flags |= PF_MEMALLOC;
299 return flags;
300}
301
302static inline void memalloc_noreclaim_restore(unsigned int flags)
303{
304 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
305}
306
307static inline unsigned int memalloc_pin_save(void)
308{
309 unsigned int flags = current->flags & PF_MEMALLOC_PIN;
310
311 current->flags |= PF_MEMALLOC_PIN;
312 return flags;
313}
314
315static inline void memalloc_pin_restore(unsigned int flags)
316{
317 current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
318}
319
320#ifdef CONFIG_MEMCG
321DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
322
323
324
325
326
327
328
329
330
331
332
333static inline struct mem_cgroup *
334set_active_memcg(struct mem_cgroup *memcg)
335{
336 struct mem_cgroup *old;
337
338 if (!in_task()) {
339 old = this_cpu_read(int_active_memcg);
340 this_cpu_write(int_active_memcg, memcg);
341 } else {
342 old = current->active_memcg;
343 current->active_memcg = memcg;
344 }
345
346 return old;
347}
348#else
349static inline struct mem_cgroup *
350set_active_memcg(struct mem_cgroup *memcg)
351{
352 return NULL;
353}
354#endif
355
356#ifdef CONFIG_MEMBARRIER
357enum {
358 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
359 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
360 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
361 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
362 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
363 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
364 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
365 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
366};
367
368enum {
369 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
370 MEMBARRIER_FLAG_RSEQ = (1U << 1),
371};
372
373#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
374#include <asm/membarrier.h>
375#endif
376
377static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
378{
379 if (current->mm != mm)
380 return;
381 if (likely(!(atomic_read(&mm->membarrier_state) &
382 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
383 return;
384 sync_core_before_usermode();
385}
386
387extern void membarrier_exec_mmap(struct mm_struct *mm);
388
389extern void membarrier_update_current_mm(struct mm_struct *next_mm);
390
391#else
392#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
393static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
394 struct mm_struct *next,
395 struct task_struct *tsk)
396{
397}
398#endif
399static inline void membarrier_exec_mmap(struct mm_struct *mm)
400{
401}
402static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
403{
404}
405static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
406{
407}
408#endif
409
410#endif
411