1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3
4
5
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/spinlock.h>
11#include <linux/memcontrol.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27struct anon_vma {
28 struct anon_vma *root;
29 spinlock_t lock;
30#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
31
32
33
34
35
36
37
38
39
40 atomic_t external_refcount;
41#endif
42
43
44
45
46
47
48
49
50 struct list_head head;
51};
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66struct anon_vma_chain {
67 struct vm_area_struct *vma;
68 struct anon_vma *anon_vma;
69 struct list_head same_vma;
70 struct list_head same_anon_vma;
71};
72
73#ifdef CONFIG_MMU
74#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
75static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
76{
77 atomic_set(&anon_vma->external_refcount, 0);
78}
79
80static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
81{
82 return atomic_read(&anon_vma->external_refcount);
83}
84
85static inline void get_anon_vma(struct anon_vma *anon_vma)
86{
87 atomic_inc(&anon_vma->external_refcount);
88}
89
90void drop_anon_vma(struct anon_vma *);
91#else
92static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
93{
94}
95
96static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
97{
98 return 0;
99}
100
101static inline void get_anon_vma(struct anon_vma *anon_vma)
102{
103}
104
105static inline void drop_anon_vma(struct anon_vma *anon_vma)
106{
107}
108#endif
109
110static inline struct anon_vma *page_anon_vma(struct page *page)
111{
112 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
113 PAGE_MAPPING_ANON)
114 return NULL;
115 return page_rmapping(page);
116}
117
118static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
119{
120 struct anon_vma *anon_vma = vma->anon_vma;
121 if (anon_vma)
122 spin_lock(&anon_vma->root->lock);
123}
124
125static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
126{
127 struct anon_vma *anon_vma = vma->anon_vma;
128 if (anon_vma)
129 spin_unlock(&anon_vma->root->lock);
130}
131
132static inline void anon_vma_lock(struct anon_vma *anon_vma)
133{
134 spin_lock(&anon_vma->root->lock);
135}
136
137static inline void anon_vma_unlock(struct anon_vma *anon_vma)
138{
139 spin_unlock(&anon_vma->root->lock);
140}
141
142
143
144
145void anon_vma_init(void);
146int anon_vma_prepare(struct vm_area_struct *);
147void unlink_anon_vmas(struct vm_area_struct *);
148int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
149int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
150void __anon_vma_link(struct vm_area_struct *);
151void anon_vma_free(struct anon_vma *);
152
153static inline void anon_vma_merge(struct vm_area_struct *vma,
154 struct vm_area_struct *next)
155{
156 VM_BUG_ON(vma->anon_vma != next->anon_vma);
157 unlink_anon_vmas(next);
158}
159
160
161
162
163void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
164void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
165void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
166 unsigned long, int);
167void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
168void page_add_file_rmap(struct page *);
169void page_remove_rmap(struct page *);
170
171void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
172 unsigned long);
173void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
174 unsigned long);
175
176static inline void page_dup_rmap(struct page *page)
177{
178 atomic_inc(&page->_mapcount);
179}
180
181
182
183
184int page_referenced(struct page *, int is_locked,
185 struct mem_cgroup *cnt, unsigned long *vm_flags);
186int page_referenced_one(struct page *, struct vm_area_struct *,
187 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
188
189enum ttu_flags {
190 TTU_UNMAP = 0,
191 TTU_MIGRATION = 1,
192 TTU_MUNLOCK = 2,
193 TTU_ACTION_MASK = 0xff,
194
195 TTU_IGNORE_MLOCK = (1 << 8),
196 TTU_IGNORE_ACCESS = (1 << 9),
197 TTU_IGNORE_HWPOISON = (1 << 10),
198};
199#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
200
201bool is_vma_temporary_stack(struct vm_area_struct *vma);
202
203int try_to_unmap(struct page *, enum ttu_flags flags);
204int try_to_unmap_one(struct page *, struct vm_area_struct *,
205 unsigned long address, enum ttu_flags flags);
206
207
208
209
210pte_t *__page_check_address(struct page *, struct mm_struct *,
211 unsigned long, spinlock_t **, int);
212
213static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
214 unsigned long address,
215 spinlock_t **ptlp, int sync)
216{
217 pte_t *ptep;
218
219 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
220 ptlp, sync));
221 return ptep;
222}
223
224
225
226
227unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
228
229
230
231
232
233
234
235int page_mkclean(struct page *);
236
237
238
239
240
241int try_to_munlock(struct page *);
242
243
244
245
246struct anon_vma *__page_lock_anon_vma(struct page *page);
247
248static inline struct anon_vma *page_lock_anon_vma(struct page *page)
249{
250 struct anon_vma *anon_vma;
251
252 __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
253
254
255 (void) __cond_lock(&anon_vma->root->lock, anon_vma);
256
257 return anon_vma;
258}
259
260void page_unlock_anon_vma(struct anon_vma *anon_vma);
261int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
262
263
264
265
266int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
267 struct vm_area_struct *, unsigned long, void *), void *arg);
268
269#else
270
271#define anon_vma_init() do {} while (0)
272#define anon_vma_prepare(vma) (0)
273#define anon_vma_link(vma) do {} while (0)
274
275static inline int page_referenced(struct page *page, int is_locked,
276 struct mem_cgroup *cnt,
277 unsigned long *vm_flags)
278{
279 *vm_flags = 0;
280 return 0;
281}
282
283#define try_to_unmap(page, refs) SWAP_FAIL
284
285static inline int page_mkclean(struct page *page)
286{
287 return 0;
288}
289
290
291#endif
292
293
294
295
296#define SWAP_SUCCESS 0
297#define SWAP_AGAIN 1
298#define SWAP_FAIL 2
299#define SWAP_MLOCK 3
300
301#endif
302