1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3
4
5
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/rwsem.h>
11#include <linux/memcontrol.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27struct anon_vma {
28 struct anon_vma *root;
29 struct rw_semaphore rwsem;
30
31
32
33
34
35
36
37 atomic_t refcount;
38
39
40
41
42
43
44
45 unsigned degree;
46
47 struct anon_vma *parent;
48
49
50
51
52
53
54
55
56
57 struct rb_root rb_root;
58};
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73struct anon_vma_chain {
74 struct vm_area_struct *vma;
75 struct anon_vma *anon_vma;
76 struct list_head same_vma;
77 struct rb_node rb;
78 unsigned long rb_subtree_last;
79#ifdef CONFIG_DEBUG_VM_RB
80 unsigned long cached_vma_start, cached_vma_last;
81#endif
82};
83
84enum ttu_flags {
85 TTU_UNMAP = 1,
86 TTU_MIGRATION = 2,
87 TTU_MUNLOCK = 4,
88 TTU_LZFREE = 8,
89 TTU_SPLIT_HUGE_PMD = 16,
90
91 TTU_IGNORE_MLOCK = (1 << 8),
92 TTU_IGNORE_ACCESS = (1 << 9),
93 TTU_IGNORE_HWPOISON = (1 << 10),
94 TTU_BATCH_FLUSH = (1 << 11),
95
96
97 TTU_RMAP_LOCKED = (1 << 12)
98
99};
100
101#ifdef CONFIG_MMU
102static inline void get_anon_vma(struct anon_vma *anon_vma)
103{
104 atomic_inc(&anon_vma->refcount);
105}
106
107void __put_anon_vma(struct anon_vma *anon_vma);
108
109static inline void put_anon_vma(struct anon_vma *anon_vma)
110{
111 if (atomic_dec_and_test(&anon_vma->refcount))
112 __put_anon_vma(anon_vma);
113}
114
115static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
116{
117 down_write(&anon_vma->root->rwsem);
118}
119
120static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
121{
122 up_write(&anon_vma->root->rwsem);
123}
124
125static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
126{
127 down_read(&anon_vma->root->rwsem);
128}
129
130static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
131{
132 up_read(&anon_vma->root->rwsem);
133}
134
135
136
137
138
139void anon_vma_init(void);
140int anon_vma_prepare(struct vm_area_struct *);
141void unlink_anon_vmas(struct vm_area_struct *);
142int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
143int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
144
145static inline void anon_vma_merge(struct vm_area_struct *vma,
146 struct vm_area_struct *next)
147{
148 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
149 unlink_anon_vmas(next);
150}
151
152struct anon_vma *page_get_anon_vma(struct page *page);
153
154
155#define RMAP_EXCLUSIVE 0x01
156#define RMAP_COMPOUND 0x02
157
158
159
160
161void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
162void page_add_anon_rmap(struct page *, struct vm_area_struct *,
163 unsigned long, bool);
164void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
165 unsigned long, int);
166void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
167 unsigned long, bool);
168void page_add_file_rmap(struct page *);
169void page_remove_rmap(struct page *, bool);
170
171void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
172 unsigned long);
173void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
174 unsigned long);
175
176static inline void page_dup_rmap(struct page *page, bool compound)
177{
178 atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
179}
180
181
182
183
184int page_referenced(struct page *, int is_locked,
185 struct mem_cgroup *memcg, unsigned long *vm_flags);
186
187#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
188
189int try_to_unmap(struct page *, enum ttu_flags flags);
190
191
192
193
194pte_t *__page_check_address(struct page *, struct mm_struct *,
195 unsigned long, spinlock_t **, int);
196
197static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
198 unsigned long address,
199 spinlock_t **ptlp, int sync)
200{
201 pte_t *ptep;
202
203 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
204 ptlp, sync));
205 return ptep;
206}
207
208
209
210
211
212#ifdef CONFIG_TRANSPARENT_HUGEPAGE
213bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
214 unsigned long address, pmd_t **pmdp,
215 pte_t **ptep, spinlock_t **ptlp);
216#else
217static inline bool page_check_address_transhuge(struct page *page,
218 struct mm_struct *mm, unsigned long address,
219 pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp)
220{
221 *ptep = page_check_address(page, mm, address, ptlp, 0);
222 *pmdp = NULL;
223 return !!*ptep;
224}
225#endif
226
227
228
229
230unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
231
232
233
234
235
236
237
238int page_mkclean(struct page *);
239
240
241
242
243
244int try_to_munlock(struct page *);
245
246void remove_migration_ptes(struct page *old, struct page *new, bool locked);
247
248
249
250
251struct anon_vma *page_lock_anon_vma_read(struct page *page);
252void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
253int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
254
255
256
257
258
259
260
261
262
263
264struct rmap_walk_control {
265 void *arg;
266 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
267 unsigned long addr, void *arg);
268 int (*done)(struct page *page);
269 struct anon_vma *(*anon_lock)(struct page *page);
270 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
271};
272
273int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
274int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
275
276#else
277
278#define anon_vma_init() do {} while (0)
279#define anon_vma_prepare(vma) (0)
280#define anon_vma_link(vma) do {} while (0)
281
282static inline int page_referenced(struct page *page, int is_locked,
283 struct mem_cgroup *memcg,
284 unsigned long *vm_flags)
285{
286 *vm_flags = 0;
287 return 0;
288}
289
290#define try_to_unmap(page, refs) SWAP_FAIL
291
292static inline int page_mkclean(struct page *page)
293{
294 return 0;
295}
296
297
298#endif
299
300
301
302
303#define SWAP_SUCCESS 0
304#define SWAP_AGAIN 1
305#define SWAP_FAIL 2
306#define SWAP_MLOCK 3
307#define SWAP_LZFREE 4
308
309#endif
310