1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3
4
5
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/rwsem.h>
11#include <linux/memcontrol.h>
12#include <linux/highmem.h>
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28struct anon_vma {
29 struct anon_vma *root;
30 struct rw_semaphore rwsem;
31
32
33
34
35
36
37
38 atomic_t refcount;
39
40
41
42
43
44
45
46 unsigned degree;
47
48 struct anon_vma *parent;
49
50
51
52
53
54
55
56
57
58 struct rb_root rb_root;
59};
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74struct anon_vma_chain {
75 struct vm_area_struct *vma;
76 struct anon_vma *anon_vma;
77 struct list_head same_vma;
78 struct rb_node rb;
79 unsigned long rb_subtree_last;
80#ifdef CONFIG_DEBUG_VM_RB
81 unsigned long cached_vma_start, cached_vma_last;
82#endif
83};
84
85enum ttu_flags {
86 TTU_UNMAP = 1,
87 TTU_MIGRATION = 2,
88 TTU_MUNLOCK = 4,
89 TTU_LZFREE = 8,
90 TTU_SPLIT_HUGE_PMD = 16,
91
92 TTU_IGNORE_MLOCK = (1 << 8),
93 TTU_IGNORE_ACCESS = (1 << 9),
94 TTU_IGNORE_HWPOISON = (1 << 10),
95 TTU_BATCH_FLUSH = (1 << 11),
96
97
98 TTU_RMAP_LOCKED = (1 << 12)
99
100};
101
102#ifdef CONFIG_MMU
103static inline void get_anon_vma(struct anon_vma *anon_vma)
104{
105 atomic_inc(&anon_vma->refcount);
106}
107
108void __put_anon_vma(struct anon_vma *anon_vma);
109
110static inline void put_anon_vma(struct anon_vma *anon_vma)
111{
112 if (atomic_dec_and_test(&anon_vma->refcount))
113 __put_anon_vma(anon_vma);
114}
115
116static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
117{
118 down_write(&anon_vma->root->rwsem);
119}
120
121static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
122{
123 up_write(&anon_vma->root->rwsem);
124}
125
126static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
127{
128 down_read(&anon_vma->root->rwsem);
129}
130
131static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
132{
133 up_read(&anon_vma->root->rwsem);
134}
135
136
137
138
139
140void anon_vma_init(void);
141int __anon_vma_prepare(struct vm_area_struct *);
142void unlink_anon_vmas(struct vm_area_struct *);
143int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
144int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
145
146static inline int anon_vma_prepare(struct vm_area_struct *vma)
147{
148 if (likely(vma->anon_vma))
149 return 0;
150
151 return __anon_vma_prepare(vma);
152}
153
154static inline void anon_vma_merge(struct vm_area_struct *vma,
155 struct vm_area_struct *next)
156{
157 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
158 unlink_anon_vmas(next);
159}
160
161struct anon_vma *page_get_anon_vma(struct page *page);
162
163
164#define RMAP_EXCLUSIVE 0x01
165#define RMAP_COMPOUND 0x02
166
167
168
169
170void page_move_anon_rmap(struct page *, struct vm_area_struct *);
171void page_add_anon_rmap(struct page *, struct vm_area_struct *,
172 unsigned long, bool);
173void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
174 unsigned long, int);
175void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
176 unsigned long, bool);
177void page_add_file_rmap(struct page *, bool);
178void page_remove_rmap(struct page *, bool);
179
180void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
181 unsigned long);
182void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
183 unsigned long);
184
185static inline void page_dup_rmap(struct page *page, bool compound)
186{
187 atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
188}
189
190
191
192
193int page_referenced(struct page *, int is_locked,
194 struct mem_cgroup *memcg, unsigned long *vm_flags);
195
196#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
197
198int try_to_unmap(struct page *, enum ttu_flags flags);
199
200
201#define PVMW_SYNC (1 << 0)
202
203#define PVMW_MIGRATION (1 << 1)
204
205struct page_vma_mapped_walk {
206 struct page *page;
207 struct vm_area_struct *vma;
208 unsigned long address;
209 pmd_t *pmd;
210 pte_t *pte;
211 spinlock_t *ptl;
212 unsigned int flags;
213};
214
215static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
216{
217 if (pvmw->pte)
218 pte_unmap(pvmw->pte);
219 if (pvmw->ptl)
220 spin_unlock(pvmw->ptl);
221}
222
223bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
224
225
226
227
228unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
229
230
231
232
233
234
235
236int page_mkclean(struct page *);
237
238
239
240
241
242int try_to_munlock(struct page *);
243
244void remove_migration_ptes(struct page *old, struct page *new, bool locked);
245
246
247
248
249struct anon_vma *page_lock_anon_vma_read(struct page *page);
250void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
251int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
252
253
254
255
256
257
258
259
260
261
262struct rmap_walk_control {
263 void *arg;
264 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
265 unsigned long addr, void *arg);
266 int (*done)(struct page *page);
267 struct anon_vma *(*anon_lock)(struct page *page);
268 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
269};
270
271int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
272int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
273
274#else
275
276#define anon_vma_init() do {} while (0)
277#define anon_vma_prepare(vma) (0)
278#define anon_vma_link(vma) do {} while (0)
279
280static inline int page_referenced(struct page *page, int is_locked,
281 struct mem_cgroup *memcg,
282 unsigned long *vm_flags)
283{
284 *vm_flags = 0;
285 return 0;
286}
287
288#define try_to_unmap(page, refs) SWAP_FAIL
289
290static inline int page_mkclean(struct page *page)
291{
292 return 0;
293}
294
295
296#endif
297
298
299
300
301#define SWAP_SUCCESS 0
302#define SWAP_AGAIN 1
303#define SWAP_FAIL 2
304#define SWAP_MLOCK 3
305#define SWAP_LZFREE 4
306
307#endif
308