1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3
4
5
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/rwsem.h>
11#include <linux/memcontrol.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27struct anon_vma {
28 struct anon_vma *root;
29 struct rw_semaphore rwsem;
30
31
32
33
34
35
36
37 atomic_t refcount;
38
39
40
41
42
43
44
45 unsigned degree;
46
47 struct anon_vma *parent;
48
49
50
51
52
53
54
55
56
57 struct rb_root rb_root;
58};
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73struct anon_vma_chain {
74 struct vm_area_struct *vma;
75 struct anon_vma *anon_vma;
76 struct list_head same_vma;
77 struct rb_node rb;
78 unsigned long rb_subtree_last;
79#ifdef CONFIG_DEBUG_VM_RB
80 unsigned long cached_vma_start, cached_vma_last;
81#endif
82};
83
84enum ttu_flags {
85 TTU_UNMAP = 1,
86 TTU_MIGRATION = 2,
87 TTU_MUNLOCK = 4,
88 TTU_LZFREE = 8,
89
90 TTU_IGNORE_MLOCK = (1 << 8),
91 TTU_IGNORE_ACCESS = (1 << 9),
92 TTU_IGNORE_HWPOISON = (1 << 10),
93 TTU_BATCH_FLUSH = (1 << 11),
94
95
96};
97
98#ifdef CONFIG_MMU
99static inline void get_anon_vma(struct anon_vma *anon_vma)
100{
101 atomic_inc(&anon_vma->refcount);
102}
103
104void __put_anon_vma(struct anon_vma *anon_vma);
105
106static inline void put_anon_vma(struct anon_vma *anon_vma)
107{
108 if (atomic_dec_and_test(&anon_vma->refcount))
109 __put_anon_vma(anon_vma);
110}
111
112static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
113{
114 down_write(&anon_vma->root->rwsem);
115}
116
117static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
118{
119 up_write(&anon_vma->root->rwsem);
120}
121
122static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
123{
124 down_read(&anon_vma->root->rwsem);
125}
126
127static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
128{
129 up_read(&anon_vma->root->rwsem);
130}
131
132
133
134
135
136void anon_vma_init(void);
137int anon_vma_prepare(struct vm_area_struct *);
138void unlink_anon_vmas(struct vm_area_struct *);
139int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
140int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
141
142static inline void anon_vma_merge(struct vm_area_struct *vma,
143 struct vm_area_struct *next)
144{
145 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
146 unlink_anon_vmas(next);
147}
148
149struct anon_vma *page_get_anon_vma(struct page *page);
150
151
152#define RMAP_EXCLUSIVE 0x01
153#define RMAP_COMPOUND 0x02
154
155
156
157
158void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
159void page_add_anon_rmap(struct page *, struct vm_area_struct *,
160 unsigned long, bool);
161void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
162 unsigned long, int);
163void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
164 unsigned long, bool);
165void page_add_file_rmap(struct page *);
166void page_remove_rmap(struct page *, bool);
167
168void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
169 unsigned long);
170void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
171 unsigned long);
172
173static inline void page_dup_rmap(struct page *page, bool compound)
174{
175 atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
176}
177
178
179
180
181int page_referenced(struct page *, int is_locked,
182 struct mem_cgroup *memcg, unsigned long *vm_flags);
183
184#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
185
186int try_to_unmap(struct page *, enum ttu_flags flags);
187
188
189
190
191pte_t *__page_check_address(struct page *, struct mm_struct *,
192 unsigned long, spinlock_t **, int);
193
194static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
195 unsigned long address,
196 spinlock_t **ptlp, int sync)
197{
198 pte_t *ptep;
199
200 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
201 ptlp, sync));
202 return ptep;
203}
204
205
206
207
208
209#ifdef CONFIG_TRANSPARENT_HUGEPAGE
210bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
211 unsigned long address, pmd_t **pmdp,
212 pte_t **ptep, spinlock_t **ptlp);
213#else
214static inline bool page_check_address_transhuge(struct page *page,
215 struct mm_struct *mm, unsigned long address,
216 pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp)
217{
218 *ptep = page_check_address(page, mm, address, ptlp, 0);
219 *pmdp = NULL;
220 return !!*ptep;
221}
222#endif
223
224
225
226
227unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
228
229
230
231
232
233
234
235int page_mkclean(struct page *);
236
237
238
239
240
241int try_to_munlock(struct page *);
242
243
244
245
246struct anon_vma *page_lock_anon_vma_read(struct page *page);
247void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
248int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
249
250
251
252
253
254
255
256
257
258
259struct rmap_walk_control {
260 void *arg;
261 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
262 unsigned long addr, void *arg);
263 int (*done)(struct page *page);
264 struct anon_vma *(*anon_lock)(struct page *page);
265 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
266};
267
268int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
269
270#else
271
272#define anon_vma_init() do {} while (0)
273#define anon_vma_prepare(vma) (0)
274#define anon_vma_link(vma) do {} while (0)
275
276static inline int page_referenced(struct page *page, int is_locked,
277 struct mem_cgroup *memcg,
278 unsigned long *vm_flags)
279{
280 *vm_flags = 0;
281 return 0;
282}
283
284#define try_to_unmap(page, refs) SWAP_FAIL
285
286static inline int page_mkclean(struct page *page)
287{
288 return 0;
289}
290
291
292#endif
293
294
295
296
297#define SWAP_SUCCESS 0
298#define SWAP_AGAIN 1
299#define SWAP_FAIL 2
300#define SWAP_MLOCK 3
301#define SWAP_LZFREE 4
302
303#endif
304