1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3
4
5
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/rwsem.h>
11#include <linux/memcontrol.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27struct anon_vma {
28 struct anon_vma *root;
29 struct rw_semaphore rwsem;
30
31
32
33
34
35
36
37 atomic_t refcount;
38
39
40
41
42
43
44
45
46
47 struct rb_root rb_root;
48
49
50
51
52
53
54
55 RH_KABI_EXTEND(unsigned degree)
56
57 RH_KABI_EXTEND(struct anon_vma *parent)
58};
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73struct anon_vma_chain {
74 struct vm_area_struct *vma;
75 struct anon_vma *anon_vma;
76 struct list_head same_vma;
77 struct rb_node rb;
78 unsigned long rb_subtree_last;
79#ifdef CONFIG_DEBUG_VM_RB
80 unsigned long cached_vma_start, cached_vma_last;
81#endif
82};
83
84enum ttu_flags {
85 TTU_UNMAP = 1,
86 TTU_MIGRATION = 2,
87 TTU_MUNLOCK = 4,
88 TTU_LZFREE = 8,
89
90 TTU_IGNORE_MLOCK = (1 << 8),
91 TTU_IGNORE_ACCESS = (1 << 9),
92 TTU_IGNORE_HWPOISON = (1 << 10),
93 TTU_BATCH_FLUSH = (1 << 11),
94
95
96};
97
98#ifdef CONFIG_MMU
99static inline void get_anon_vma(struct anon_vma *anon_vma)
100{
101 atomic_inc(&anon_vma->refcount);
102}
103
104void __put_anon_vma(struct anon_vma *anon_vma);
105
106static inline void put_anon_vma(struct anon_vma *anon_vma)
107{
108 if (atomic_dec_and_test(&anon_vma->refcount))
109 __put_anon_vma(anon_vma);
110}
111
112static inline struct anon_vma *page_anon_vma(struct page *page)
113{
114 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
115 PAGE_MAPPING_ANON)
116 return NULL;
117 return page_rmapping(page);
118}
119
120static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
121{
122 struct anon_vma *anon_vma = vma->anon_vma;
123 if (anon_vma)
124 down_write(&anon_vma->root->rwsem);
125}
126
127static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
128{
129 struct anon_vma *anon_vma = vma->anon_vma;
130 if (anon_vma)
131 up_write(&anon_vma->root->rwsem);
132}
133
134static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
135{
136 down_write(&anon_vma->root->rwsem);
137}
138
139static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
140{
141 up_write(&anon_vma->root->rwsem);
142}
143
144static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
145{
146 down_read(&anon_vma->root->rwsem);
147}
148
149static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
150{
151 up_read(&anon_vma->root->rwsem);
152}
153
154
155
156
157
158void anon_vma_init(void);
159int anon_vma_prepare(struct vm_area_struct *);
160void unlink_anon_vmas(struct vm_area_struct *);
161int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
162int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
163
164static inline void anon_vma_merge(struct vm_area_struct *vma,
165 struct vm_area_struct *next)
166{
167 VM_BUG_ON(vma->anon_vma != next->anon_vma);
168 unlink_anon_vmas(next);
169}
170
171struct anon_vma *page_get_anon_vma(struct page *page);
172
173
174
175
176void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
177void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
178void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
179 unsigned long, int);
180void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
181void page_add_file_rmap(struct page *);
182void page_remove_rmap(struct page *);
183
184void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
185 unsigned long);
186void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
187 unsigned long);
188
189static inline void page_dup_rmap(struct page *page)
190{
191 atomic_inc(&page->_mapcount);
192}
193
194
195
196
197int page_referenced(struct page *, int is_locked,
198 struct mem_cgroup *memcg, unsigned long *vm_flags);
199int page_referenced_one(struct page *, struct vm_area_struct *,
200 unsigned long address, void *arg);
201
202int try_to_unmap(struct page *, enum ttu_flags flags);
203int try_to_unmap_one(struct page *, struct vm_area_struct *,
204 unsigned long address, void *arg);
205
206
207
208
209pte_t *__page_check_address(struct page *, struct mm_struct *,
210 unsigned long, spinlock_t **, int);
211
212static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
213 unsigned long address,
214 spinlock_t **ptlp, int sync)
215{
216 pte_t *ptep;
217
218 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
219 ptlp, sync));
220 return ptep;
221}
222
223
224
225
226unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
227
228
229
230
231
232
233
234int page_mkclean(struct page *);
235
236
237
238
239
240int try_to_munlock(struct page *);
241
242
243
244
245struct anon_vma *page_lock_anon_vma_read(struct page *page);
246void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
247int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
248
249
250
251
252
253
254
255
256
257
258
259struct rmap_walk_control {
260 void *arg;
261 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
262 unsigned long addr, void *arg);
263 int (*done)(struct page *page);
264 int (*file_nonlinear)(struct page *, struct address_space *,
265 struct vm_area_struct *vma);
266 struct anon_vma *(*anon_lock)(struct page *page);
267 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
268};
269
270int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
271
272#else
273
274#define anon_vma_init() do {} while (0)
275#define anon_vma_prepare(vma) (0)
276#define anon_vma_link(vma) do {} while (0)
277
278static inline int page_referenced(struct page *page, int is_locked,
279 struct mem_cgroup *memcg,
280 unsigned long *vm_flags)
281{
282 *vm_flags = 0;
283 return 0;
284}
285
286#define try_to_unmap(page, refs) SWAP_FAIL
287
288static inline int page_mkclean(struct page *page)
289{
290 return 0;
291}
292
293
294#endif
295
296
297
298
299#define SWAP_SUCCESS 0
300#define SWAP_AGAIN 1
301#define SWAP_FAIL 2
302#define SWAP_MLOCK 3
303#define SWAP_LZFREE 4
304
305#endif
306