1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3
4
5
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/rwsem.h>
11#include <linux/memcontrol.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27struct anon_vma {
28 struct anon_vma *root;
29 struct rw_semaphore rwsem;
30
31
32
33
34
35
36
37 atomic_t refcount;
38
39
40
41
42
43
44
45 unsigned degree;
46
47 struct anon_vma *parent;
48
49
50
51
52
53
54
55
56
57 struct rb_root rb_root;
58};
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73struct anon_vma_chain {
74 struct vm_area_struct *vma;
75 struct anon_vma *anon_vma;
76 struct list_head same_vma;
77 struct rb_node rb;
78 unsigned long rb_subtree_last;
79#ifdef CONFIG_DEBUG_VM_RB
80 unsigned long cached_vma_start, cached_vma_last;
81#endif
82};
83
84enum ttu_flags {
85 TTU_UNMAP = 1,
86 TTU_MIGRATION = 2,
87 TTU_MUNLOCK = 4,
88
89 TTU_IGNORE_MLOCK = (1 << 8),
90 TTU_IGNORE_ACCESS = (1 << 9),
91 TTU_IGNORE_HWPOISON = (1 << 10),
92};
93
94#ifdef CONFIG_MMU
95static inline void get_anon_vma(struct anon_vma *anon_vma)
96{
97 atomic_inc(&anon_vma->refcount);
98}
99
100void __put_anon_vma(struct anon_vma *anon_vma);
101
102static inline void put_anon_vma(struct anon_vma *anon_vma)
103{
104 if (atomic_dec_and_test(&anon_vma->refcount))
105 __put_anon_vma(anon_vma);
106}
107
108static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
109{
110 struct anon_vma *anon_vma = vma->anon_vma;
111 if (anon_vma)
112 down_write(&anon_vma->root->rwsem);
113}
114
115static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
116{
117 struct anon_vma *anon_vma = vma->anon_vma;
118 if (anon_vma)
119 up_write(&anon_vma->root->rwsem);
120}
121
122static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
123{
124 down_write(&anon_vma->root->rwsem);
125}
126
127static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
128{
129 up_write(&anon_vma->root->rwsem);
130}
131
132static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
133{
134 down_read(&anon_vma->root->rwsem);
135}
136
137static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
138{
139 up_read(&anon_vma->root->rwsem);
140}
141
142
143
144
145
146void anon_vma_init(void);
147int anon_vma_prepare(struct vm_area_struct *);
148void unlink_anon_vmas(struct vm_area_struct *);
149int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
150int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
151
152static inline void anon_vma_merge(struct vm_area_struct *vma,
153 struct vm_area_struct *next)
154{
155 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
156 unlink_anon_vmas(next);
157}
158
159struct anon_vma *page_get_anon_vma(struct page *page);
160
161
162
163
164void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
165void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
166void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
167 unsigned long, int);
168void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
169void page_add_file_rmap(struct page *);
170void page_remove_rmap(struct page *);
171
172void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
173 unsigned long);
174void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
175 unsigned long);
176
177static inline void page_dup_rmap(struct page *page)
178{
179 atomic_inc(&page->_mapcount);
180}
181
182
183
184
185int page_referenced(struct page *, int is_locked,
186 struct mem_cgroup *memcg, unsigned long *vm_flags);
187
188#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
189
190int try_to_unmap(struct page *, enum ttu_flags flags);
191
192
193
194
195pte_t *__page_check_address(struct page *, struct mm_struct *,
196 unsigned long, spinlock_t **, int);
197
198static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
199 unsigned long address,
200 spinlock_t **ptlp, int sync)
201{
202 pte_t *ptep;
203
204 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
205 ptlp, sync));
206 return ptep;
207}
208
209
210
211
212unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
213
214
215
216
217
218
219
220int page_mkclean(struct page *);
221
222
223
224
225
226int try_to_munlock(struct page *);
227
228
229
230
231struct anon_vma *page_lock_anon_vma_read(struct page *page);
232void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
233int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
234
235
236
237
238
239
240
241
242
243
244struct rmap_walk_control {
245 void *arg;
246 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
247 unsigned long addr, void *arg);
248 int (*done)(struct page *page);
249 struct anon_vma *(*anon_lock)(struct page *page);
250 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
251};
252
253int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
254
255#else
256
257#define anon_vma_init() do {} while (0)
258#define anon_vma_prepare(vma) (0)
259#define anon_vma_link(vma) do {} while (0)
260
261static inline int page_referenced(struct page *page, int is_locked,
262 struct mem_cgroup *memcg,
263 unsigned long *vm_flags)
264{
265 *vm_flags = 0;
266 return 0;
267}
268
269#define try_to_unmap(page, refs) SWAP_FAIL
270
271static inline int page_mkclean(struct page *page)
272{
273 return 0;
274}
275
276
277#endif
278
279
280
281
282#define SWAP_SUCCESS 0
283#define SWAP_AGAIN 1
284#define SWAP_FAIL 2
285#define SWAP_MLOCK 3
286
287#endif
288