1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3
4
5
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/rwsem.h>
11#include <linux/memcontrol.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27struct anon_vma {
28 struct anon_vma *root;
29 struct rw_semaphore rwsem;
30
31
32
33
34
35
36
37 atomic_t refcount;
38
39
40
41
42
43
44
45
46
47 struct rb_root rb_root;
48};
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63struct anon_vma_chain {
64 struct vm_area_struct *vma;
65 struct anon_vma *anon_vma;
66 struct list_head same_vma;
67 struct rb_node rb;
68 unsigned long rb_subtree_last;
69#ifdef CONFIG_DEBUG_VM_RB
70 unsigned long cached_vma_start, cached_vma_last;
71#endif
72};
73
74enum ttu_flags {
75 TTU_UNMAP = 0,
76 TTU_MIGRATION = 1,
77 TTU_MUNLOCK = 2,
78 TTU_ACTION_MASK = 0xff,
79
80 TTU_IGNORE_MLOCK = (1 << 8),
81 TTU_IGNORE_ACCESS = (1 << 9),
82 TTU_IGNORE_HWPOISON = (1 << 10),
83};
84
85#ifdef CONFIG_MMU
86static inline void get_anon_vma(struct anon_vma *anon_vma)
87{
88 atomic_inc(&anon_vma->refcount);
89}
90
91void __put_anon_vma(struct anon_vma *anon_vma);
92
93static inline void put_anon_vma(struct anon_vma *anon_vma)
94{
95 if (atomic_dec_and_test(&anon_vma->refcount))
96 __put_anon_vma(anon_vma);
97}
98
99static inline struct anon_vma *page_anon_vma(struct page *page)
100{
101 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
102 PAGE_MAPPING_ANON)
103 return NULL;
104 return page_rmapping(page);
105}
106
107static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
108{
109 struct anon_vma *anon_vma = vma->anon_vma;
110 if (anon_vma)
111 down_write(&anon_vma->root->rwsem);
112}
113
114static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
115{
116 struct anon_vma *anon_vma = vma->anon_vma;
117 if (anon_vma)
118 up_write(&anon_vma->root->rwsem);
119}
120
121static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
122{
123 down_write(&anon_vma->root->rwsem);
124}
125
126static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
127{
128 up_write(&anon_vma->root->rwsem);
129}
130
131static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
132{
133 down_read(&anon_vma->root->rwsem);
134}
135
136static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
137{
138 up_read(&anon_vma->root->rwsem);
139}
140
141
142
143
144
145void anon_vma_init(void);
146int anon_vma_prepare(struct vm_area_struct *);
147void unlink_anon_vmas(struct vm_area_struct *);
148int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
149int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
150
151static inline void anon_vma_merge(struct vm_area_struct *vma,
152 struct vm_area_struct *next)
153{
154 VM_BUG_ON(vma->anon_vma != next->anon_vma);
155 unlink_anon_vmas(next);
156}
157
158struct anon_vma *page_get_anon_vma(struct page *page);
159
160
161
162
163void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
164void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
165void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
166 unsigned long, int);
167void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
168void page_add_file_rmap(struct page *);
169void page_remove_rmap(struct page *);
170
171void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
172 unsigned long);
173void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
174 unsigned long);
175
176static inline void page_dup_rmap(struct page *page)
177{
178 atomic_inc(&page->_mapcount);
179}
180
181
182
183
184int page_referenced(struct page *, int is_locked,
185 struct mem_cgroup *memcg, unsigned long *vm_flags);
186int page_referenced_one(struct page *, struct vm_area_struct *,
187 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
188
189#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
190
191int try_to_unmap(struct page *, enum ttu_flags flags);
192int try_to_unmap_one(struct page *, struct vm_area_struct *,
193 unsigned long address, enum ttu_flags flags);
194
195
196
197
198pte_t *__page_check_address(struct page *, struct mm_struct *,
199 unsigned long, spinlock_t **, int);
200
201static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
202 unsigned long address,
203 spinlock_t **ptlp, int sync)
204{
205 pte_t *ptep;
206
207 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
208 ptlp, sync));
209 return ptep;
210}
211
212
213
214
215unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
216
217
218
219
220
221
222
223int page_mkclean(struct page *);
224
225
226
227
228
229int try_to_munlock(struct page *);
230
231
232
233
234struct anon_vma *page_lock_anon_vma_read(struct page *page);
235void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
236int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
237
238
239
240
241int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
242 struct vm_area_struct *, unsigned long, void *), void *arg);
243
244#else
245
246#define anon_vma_init() do {} while (0)
247#define anon_vma_prepare(vma) (0)
248#define anon_vma_link(vma) do {} while (0)
249
250static inline int page_referenced(struct page *page, int is_locked,
251 struct mem_cgroup *memcg,
252 unsigned long *vm_flags)
253{
254 *vm_flags = 0;
255 return 0;
256}
257
258#define try_to_unmap(page, refs) SWAP_FAIL
259
260static inline int page_mkclean(struct page *page)
261{
262 return 0;
263}
264
265
266#endif
267
268
269
270
271#define SWAP_SUCCESS 0
272#define SWAP_AGAIN 1
273#define SWAP_FAIL 2
274#define SWAP_MLOCK 3
275
276#endif
277