1
2#ifndef _LINUX_RMAP_H
3#define _LINUX_RMAP_H
4
5
6
7
8#include <linux/list.h>
9#include <linux/slab.h>
10#include <linux/mm.h>
11#include <linux/rwsem.h>
12#include <linux/memcontrol.h>
13#include <linux/highmem.h>
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29struct anon_vma {
30 struct anon_vma *root;
31 struct rw_semaphore rwsem;
32
33
34
35
36
37
38
39 atomic_t refcount;
40
41
42
43
44
45
46
47 unsigned degree;
48
49 struct anon_vma *parent;
50
51
52
53
54
55
56
57
58
59
60
61 struct rb_root_cached rb_root;
62};
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77struct anon_vma_chain {
78 struct vm_area_struct *vma;
79 struct anon_vma *anon_vma;
80 struct list_head same_vma;
81 struct rb_node rb;
82 unsigned long rb_subtree_last;
83#ifdef CONFIG_DEBUG_VM_RB
84 unsigned long cached_vma_start, cached_vma_last;
85#endif
86};
87
88enum ttu_flags {
89 TTU_MIGRATION = 0x1,
90 TTU_MUNLOCK = 0x2,
91
92 TTU_SPLIT_HUGE_PMD = 0x4,
93 TTU_IGNORE_MLOCK = 0x8,
94 TTU_IGNORE_HWPOISON = 0x20,
95 TTU_BATCH_FLUSH = 0x40,
96
97
98 TTU_RMAP_LOCKED = 0x80,
99
100 TTU_SPLIT_FREEZE = 0x100,
101};
102
103#ifdef CONFIG_MMU
104static inline void get_anon_vma(struct anon_vma *anon_vma)
105{
106 atomic_inc(&anon_vma->refcount);
107}
108
109void __put_anon_vma(struct anon_vma *anon_vma);
110
111static inline void put_anon_vma(struct anon_vma *anon_vma)
112{
113 if (atomic_dec_and_test(&anon_vma->refcount))
114 __put_anon_vma(anon_vma);
115}
116
117static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
118{
119 down_write(&anon_vma->root->rwsem);
120}
121
122static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
123{
124 up_write(&anon_vma->root->rwsem);
125}
126
127static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
128{
129 down_read(&anon_vma->root->rwsem);
130}
131
132static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
133{
134 up_read(&anon_vma->root->rwsem);
135}
136
137
138
139
140
141void anon_vma_init(void);
142int __anon_vma_prepare(struct vm_area_struct *);
143void unlink_anon_vmas(struct vm_area_struct *);
144int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
145int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
146
147static inline int anon_vma_prepare(struct vm_area_struct *vma)
148{
149 if (likely(vma->anon_vma))
150 return 0;
151
152 return __anon_vma_prepare(vma);
153}
154
155static inline void anon_vma_merge(struct vm_area_struct *vma,
156 struct vm_area_struct *next)
157{
158 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
159 unlink_anon_vmas(next);
160}
161
162struct anon_vma *page_get_anon_vma(struct page *page);
163
164
165#define RMAP_EXCLUSIVE 0x01
166#define RMAP_COMPOUND 0x02
167
168
169
170
171void page_move_anon_rmap(struct page *, struct vm_area_struct *);
172void page_add_anon_rmap(struct page *, struct vm_area_struct *,
173 unsigned long, bool);
174void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
175 unsigned long, int);
176void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
177 unsigned long, bool);
178void page_add_file_rmap(struct page *, bool);
179void page_remove_rmap(struct page *, bool);
180
181void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
182 unsigned long);
183void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
184 unsigned long);
185
186static inline void page_dup_rmap(struct page *page, bool compound)
187{
188 atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
189}
190
191
192
193
194int page_referenced(struct page *, int is_locked,
195 struct mem_cgroup *memcg, unsigned long *vm_flags);
196
197bool try_to_unmap(struct page *, enum ttu_flags flags);
198
199
200#define PVMW_SYNC (1 << 0)
201
202#define PVMW_MIGRATION (1 << 1)
203
204struct page_vma_mapped_walk {
205 struct page *page;
206 struct vm_area_struct *vma;
207 unsigned long address;
208 pmd_t *pmd;
209 pte_t *pte;
210 spinlock_t *ptl;
211 unsigned int flags;
212};
213
214static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
215{
216 if (pvmw->pte)
217 pte_unmap(pvmw->pte);
218 if (pvmw->ptl)
219 spin_unlock(pvmw->ptl);
220}
221
222bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
223
224
225
226
227unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
228
229
230
231
232
233
234
235int page_mkclean(struct page *);
236
237
238
239
240
241void try_to_munlock(struct page *);
242
243void remove_migration_ptes(struct page *old, struct page *new, bool locked);
244
245
246
247
248struct anon_vma *page_lock_anon_vma_read(struct page *page);
249void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
250int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
251
252
253
254
255
256
257
258
259
260
261struct rmap_walk_control {
262 void *arg;
263
264
265
266
267 bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
268 unsigned long addr, void *arg);
269 int (*done)(struct page *page);
270 struct anon_vma *(*anon_lock)(struct page *page);
271 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
272};
273
274void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
275void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
276
277#else
278
279#define anon_vma_init() do {} while (0)
280#define anon_vma_prepare(vma) (0)
281#define anon_vma_link(vma) do {} while (0)
282
283static inline int page_referenced(struct page *page, int is_locked,
284 struct mem_cgroup *memcg,
285 unsigned long *vm_flags)
286{
287 *vm_flags = 0;
288 return 0;
289}
290
291#define try_to_unmap(page, refs) false
292
293static inline int page_mkclean(struct page *page)
294{
295 return 0;
296}
297
298
299#endif
300
301#endif
302