1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3
4
5
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/rwsem.h>
11#include <linux/memcontrol.h>
12#include <linux/highmem.h>
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28struct anon_vma {
29 struct anon_vma *root;
30 struct rw_semaphore rwsem;
31
32
33
34
35
36
37
38 atomic_t refcount;
39
40
41
42
43
44
45
46 unsigned degree;
47
48 struct anon_vma *parent;
49
50
51
52
53
54
55
56
57
58 struct rb_root rb_root;
59};
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74struct anon_vma_chain {
75 struct vm_area_struct *vma;
76 struct anon_vma *anon_vma;
77 struct list_head same_vma;
78 struct rb_node rb;
79 unsigned long rb_subtree_last;
80#ifdef CONFIG_DEBUG_VM_RB
81 unsigned long cached_vma_start, cached_vma_last;
82#endif
83};
84
85enum ttu_flags {
86 TTU_MIGRATION = 0x1,
87 TTU_MUNLOCK = 0x2,
88
89 TTU_SPLIT_HUGE_PMD = 0x4,
90 TTU_IGNORE_MLOCK = 0x8,
91 TTU_IGNORE_ACCESS = 0x10,
92 TTU_IGNORE_HWPOISON = 0x20,
93 TTU_BATCH_FLUSH = 0x40,
94
95
96 TTU_RMAP_LOCKED = 0x80
97
98};
99
100#ifdef CONFIG_MMU
101static inline void get_anon_vma(struct anon_vma *anon_vma)
102{
103 atomic_inc(&anon_vma->refcount);
104}
105
106void __put_anon_vma(struct anon_vma *anon_vma);
107
108static inline void put_anon_vma(struct anon_vma *anon_vma)
109{
110 if (atomic_dec_and_test(&anon_vma->refcount))
111 __put_anon_vma(anon_vma);
112}
113
114static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
115{
116 down_write(&anon_vma->root->rwsem);
117}
118
119static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
120{
121 up_write(&anon_vma->root->rwsem);
122}
123
124static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
125{
126 down_read(&anon_vma->root->rwsem);
127}
128
129static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
130{
131 up_read(&anon_vma->root->rwsem);
132}
133
134
135
136
137
138void anon_vma_init(void);
139int __anon_vma_prepare(struct vm_area_struct *);
140void unlink_anon_vmas(struct vm_area_struct *);
141int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
142int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
143
144static inline int anon_vma_prepare(struct vm_area_struct *vma)
145{
146 if (likely(vma->anon_vma))
147 return 0;
148
149 return __anon_vma_prepare(vma);
150}
151
152static inline void anon_vma_merge(struct vm_area_struct *vma,
153 struct vm_area_struct *next)
154{
155 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
156 unlink_anon_vmas(next);
157}
158
159struct anon_vma *page_get_anon_vma(struct page *page);
160
161
162#define RMAP_EXCLUSIVE 0x01
163#define RMAP_COMPOUND 0x02
164
165
166
167
168void page_move_anon_rmap(struct page *, struct vm_area_struct *);
169void page_add_anon_rmap(struct page *, struct vm_area_struct *,
170 unsigned long, bool);
171void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
172 unsigned long, int);
173void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
174 unsigned long, bool);
175void page_add_file_rmap(struct page *, bool);
176void page_remove_rmap(struct page *, bool);
177
178void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
179 unsigned long);
180void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
181 unsigned long);
182
183static inline void page_dup_rmap(struct page *page, bool compound)
184{
185 atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
186}
187
188
189
190
191int page_referenced(struct page *, int is_locked,
192 struct mem_cgroup *memcg, unsigned long *vm_flags);
193
194bool try_to_unmap(struct page *, enum ttu_flags flags);
195
196
197#define PVMW_SYNC (1 << 0)
198
199#define PVMW_MIGRATION (1 << 1)
200
201struct page_vma_mapped_walk {
202 struct page *page;
203 struct vm_area_struct *vma;
204 unsigned long address;
205 pmd_t *pmd;
206 pte_t *pte;
207 spinlock_t *ptl;
208 unsigned int flags;
209};
210
211static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
212{
213 if (pvmw->pte)
214 pte_unmap(pvmw->pte);
215 if (pvmw->ptl)
216 spin_unlock(pvmw->ptl);
217}
218
219bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
220
221
222
223
224unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
225
226
227
228
229
230
231
232int page_mkclean(struct page *);
233
234
235
236
237
238void try_to_munlock(struct page *);
239
240void remove_migration_ptes(struct page *old, struct page *new, bool locked);
241
242
243
244
245struct anon_vma *page_lock_anon_vma_read(struct page *page);
246void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
247int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
248
249
250
251
252
253
254
255
256
257
258struct rmap_walk_control {
259 void *arg;
260
261
262
263
264 bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
265 unsigned long addr, void *arg);
266 int (*done)(struct page *page);
267 struct anon_vma *(*anon_lock)(struct page *page);
268 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
269};
270
271void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
272void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
273
274#else
275
276#define anon_vma_init() do {} while (0)
277#define anon_vma_prepare(vma) (0)
278#define anon_vma_link(vma) do {} while (0)
279
280static inline int page_referenced(struct page *page, int is_locked,
281 struct mem_cgroup *memcg,
282 unsigned long *vm_flags)
283{
284 *vm_flags = 0;
285 return 0;
286}
287
288#define try_to_unmap(page, refs) false
289
290static inline int page_mkclean(struct page *page)
291{
292 return 0;
293}
294
295
296#endif
297
298#endif
299