1
2#ifndef _LINUX_SCHED_MM_H
3#define _LINUX_SCHED_MM_H
4
5#include <linux/kernel.h>
6#include <linux/atomic.h>
7#include <linux/sched.h>
8#include <linux/mm_types.h>
9#include <linux/gfp.h>
10#include <linux/sync_core.h>
11
12
13
14
15extern struct mm_struct *mm_alloc(void);
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34static inline void mmgrab(struct mm_struct *mm)
35{
36 atomic_inc(&mm->mm_count);
37}
38
39extern void __mmdrop(struct mm_struct *mm);
40
41static inline void mmdrop(struct mm_struct *mm)
42{
43
44
45
46
47
48 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
49 __mmdrop(mm);
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static inline void mmget(struct mm_struct *mm)
69{
70 atomic_inc(&mm->mm_users);
71}
72
73static inline bool mmget_not_zero(struct mm_struct *mm)
74{
75 return atomic_inc_not_zero(&mm->mm_users);
76}
77
78
79extern void mmput(struct mm_struct *);
80#ifdef CONFIG_MMU
81
82
83
84void mmput_async(struct mm_struct *);
85#endif
86
87
88extern struct mm_struct *get_task_mm(struct task_struct *task);
89
90
91
92
93
94extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
95
96extern void mm_release(struct task_struct *, struct mm_struct *);
97
98#ifdef CONFIG_MEMCG
99extern void mm_update_next_owner(struct mm_struct *mm);
100#else
101static inline void mm_update_next_owner(struct mm_struct *mm)
102{
103}
104#endif
105
106#ifdef CONFIG_MMU
107extern void arch_pick_mmap_layout(struct mm_struct *mm);
108extern unsigned long
109arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
110 unsigned long, unsigned long);
111extern unsigned long
112arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
113 unsigned long len, unsigned long pgoff,
114 unsigned long flags);
115#else
116static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
117#endif
118
119static inline bool in_vfork(struct task_struct *tsk)
120{
121 bool ret;
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138 rcu_read_lock();
139 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
140 rcu_read_unlock();
141
142 return ret;
143}
144
145
146
147
148
149
150static inline gfp_t current_gfp_context(gfp_t flags)
151{
152
153
154
155
156 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
157 flags &= ~(__GFP_IO | __GFP_FS);
158 else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
159 flags &= ~__GFP_FS;
160 return flags;
161}
162
163#ifdef CONFIG_LOCKDEP
164extern void fs_reclaim_acquire(gfp_t gfp_mask);
165extern void fs_reclaim_release(gfp_t gfp_mask);
166#else
167static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
168static inline void fs_reclaim_release(gfp_t gfp_mask) { }
169#endif
170
171static inline unsigned int memalloc_noio_save(void)
172{
173 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
174 current->flags |= PF_MEMALLOC_NOIO;
175 return flags;
176}
177
178static inline void memalloc_noio_restore(unsigned int flags)
179{
180 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
181}
182
183static inline unsigned int memalloc_nofs_save(void)
184{
185 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
186 current->flags |= PF_MEMALLOC_NOFS;
187 return flags;
188}
189
190static inline void memalloc_nofs_restore(unsigned int flags)
191{
192 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
193}
194
195static inline unsigned int memalloc_noreclaim_save(void)
196{
197 unsigned int flags = current->flags & PF_MEMALLOC;
198 current->flags |= PF_MEMALLOC;
199 return flags;
200}
201
202static inline void memalloc_noreclaim_restore(unsigned int flags)
203{
204 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
205}
206
207#ifdef CONFIG_MEMBARRIER
208enum {
209 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
210 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
211 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
212 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
213 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
214 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
215};
216
217enum {
218 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
219};
220
221#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
222#include <asm/membarrier.h>
223#endif
224
225static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
226{
227 if (likely(!(atomic_read(&mm->membarrier_state) &
228 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
229 return;
230 sync_core_before_usermode();
231}
232
233static inline void membarrier_execve(struct task_struct *t)
234{
235 atomic_set(&t->mm->membarrier_state, 0);
236}
237#else
238#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
239static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
240 struct mm_struct *next,
241 struct task_struct *tsk)
242{
243}
244#endif
245static inline void membarrier_execve(struct task_struct *t)
246{
247}
248static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
249{
250}
251#endif
252
253#endif
254