1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3
4
5
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/spinlock.h>
11#include <linux/memcontrol.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27struct anon_vma {
28 struct anon_vma *root;
29 spinlock_t lock;
30
31
32
33
34
35
36
37 atomic_t refcount;
38
39
40
41
42
43
44
45
46
47 struct list_head head;
48};
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63struct anon_vma_chain {
64 struct vm_area_struct *vma;
65 struct anon_vma *anon_vma;
66 struct list_head same_vma;
67 struct list_head same_anon_vma;
68};
69
70#ifdef CONFIG_MMU
71static inline void get_anon_vma(struct anon_vma *anon_vma)
72{
73 atomic_inc(&anon_vma->refcount);
74}
75
76void __put_anon_vma(struct anon_vma *anon_vma);
77
78static inline void put_anon_vma(struct anon_vma *anon_vma)
79{
80 if (atomic_dec_and_test(&anon_vma->refcount))
81 __put_anon_vma(anon_vma);
82}
83
84static inline struct anon_vma *page_anon_vma(struct page *page)
85{
86 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
87 PAGE_MAPPING_ANON)
88 return NULL;
89 return page_rmapping(page);
90}
91
92static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
93{
94 struct anon_vma *anon_vma = vma->anon_vma;
95 if (anon_vma)
96 spin_lock(&anon_vma->root->lock);
97}
98
99static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
100{
101 struct anon_vma *anon_vma = vma->anon_vma;
102 if (anon_vma)
103 spin_unlock(&anon_vma->root->lock);
104}
105
106static inline void anon_vma_lock(struct anon_vma *anon_vma)
107{
108 spin_lock(&anon_vma->root->lock);
109}
110
111static inline void anon_vma_unlock(struct anon_vma *anon_vma)
112{
113 spin_unlock(&anon_vma->root->lock);
114}
115
116
117
118
119void anon_vma_init(void);
120int anon_vma_prepare(struct vm_area_struct *);
121void unlink_anon_vmas(struct vm_area_struct *);
122int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
123int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
124void __anon_vma_link(struct vm_area_struct *);
125
126static inline void anon_vma_merge(struct vm_area_struct *vma,
127 struct vm_area_struct *next)
128{
129 VM_BUG_ON(vma->anon_vma != next->anon_vma);
130 unlink_anon_vmas(next);
131}
132
133struct anon_vma *page_get_anon_vma(struct page *page);
134
135
136
137
138void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
139void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
140void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
141 unsigned long, int);
142void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
143void page_add_file_rmap(struct page *);
144void page_remove_rmap(struct page *);
145
146void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
147 unsigned long);
148void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
149 unsigned long);
150
151static inline void page_dup_rmap(struct page *page)
152{
153 atomic_inc(&page->_mapcount);
154}
155
156
157
158
159int page_referenced(struct page *, int is_locked,
160 struct mem_cgroup *cnt, unsigned long *vm_flags);
161int page_referenced_one(struct page *, struct vm_area_struct *,
162 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
163
164enum ttu_flags {
165 TTU_UNMAP = 0,
166 TTU_MIGRATION = 1,
167 TTU_MUNLOCK = 2,
168 TTU_ACTION_MASK = 0xff,
169
170 TTU_IGNORE_MLOCK = (1 << 8),
171 TTU_IGNORE_ACCESS = (1 << 9),
172 TTU_IGNORE_HWPOISON = (1 << 10),
173};
174#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
175
176bool is_vma_temporary_stack(struct vm_area_struct *vma);
177
178int try_to_unmap(struct page *, enum ttu_flags flags);
179int try_to_unmap_one(struct page *, struct vm_area_struct *,
180 unsigned long address, enum ttu_flags flags);
181
182
183
184
185pte_t *__page_check_address(struct page *, struct mm_struct *,
186 unsigned long, spinlock_t **, int);
187
188static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
189 unsigned long address,
190 spinlock_t **ptlp, int sync)
191{
192 pte_t *ptep;
193
194 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
195 ptlp, sync));
196 return ptep;
197}
198
199
200
201
202unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
203
204
205
206
207
208
209
210int page_mkclean(struct page *);
211
212
213
214
215
216int try_to_munlock(struct page *);
217
218
219
220
221struct anon_vma *__page_lock_anon_vma(struct page *page);
222
223static inline struct anon_vma *page_lock_anon_vma(struct page *page)
224{
225 struct anon_vma *anon_vma;
226
227 __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
228
229
230 (void) __cond_lock(&anon_vma->root->lock, anon_vma);
231
232 return anon_vma;
233}
234
235void page_unlock_anon_vma(struct anon_vma *anon_vma);
236int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
237
238
239
240
241int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
242 struct vm_area_struct *, unsigned long, void *), void *arg);
243
244#else
245
246#define anon_vma_init() do {} while (0)
247#define anon_vma_prepare(vma) (0)
248#define anon_vma_link(vma) do {} while (0)
249
250static inline int page_referenced(struct page *page, int is_locked,
251 struct mem_cgroup *cnt,
252 unsigned long *vm_flags)
253{
254 *vm_flags = 0;
255 return 0;
256}
257
258#define try_to_unmap(page, refs) SWAP_FAIL
259
260static inline int page_mkclean(struct page *page)
261{
262 return 0;
263}
264
265
266#endif
267
268
269
270
271#define SWAP_SUCCESS 0
272#define SWAP_AGAIN 1
273#define SWAP_FAIL 2
274#define SWAP_MLOCK 3
275
276#endif
277