1
2
3
4
5
6
7
8#include <linux/mm.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/balloon_compaction.h>
12
13
14
15
16
17
18
19
20
21
22struct page *balloon_page_alloc(void)
23{
24 struct page *page = alloc_page(balloon_mapping_gfp_mask() |
25 __GFP_NOMEMALLOC | __GFP_NORETRY);
26 return page;
27}
28EXPORT_SYMBOL_GPL(balloon_page_alloc);
29
30
31
32
33
34
35
36
37
38
39
40
41void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
42 struct page *page)
43{
44 unsigned long flags;
45
46
47
48
49
50
51 BUG_ON(!trylock_page(page));
52 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
53 balloon_page_insert(b_dev_info, page);
54 __count_vm_event(BALLOON_INFLATE);
55 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
56 unlock_page(page);
57}
58EXPORT_SYMBOL_GPL(balloon_page_enqueue);
59
60
61
62
63
64
65
66
67
68
69
70
71struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
72{
73 struct page *page, *tmp;
74 unsigned long flags;
75 bool dequeued_page;
76
77 dequeued_page = false;
78 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
79 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
80
81
82
83
84
85 if (trylock_page(page)) {
86#ifdef CONFIG_BALLOON_COMPACTION
87 if (!PagePrivate(page)) {
88
89 unlock_page(page);
90 continue;
91 }
92#endif
93 balloon_page_delete(page);
94 __count_vm_event(BALLOON_DEFLATE);
95 unlock_page(page);
96 dequeued_page = true;
97 break;
98 }
99 }
100 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
101
102 if (!dequeued_page) {
103
104
105
106
107
108
109
110 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
111 if (unlikely(list_empty(&b_dev_info->pages) &&
112 !b_dev_info->isolated_pages))
113 BUG();
114 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
115 page = NULL;
116 }
117 return page;
118}
119EXPORT_SYMBOL_GPL(balloon_page_dequeue);
120
121#ifdef CONFIG_BALLOON_COMPACTION
122
123static inline void __isolate_balloon_page(struct page *page)
124{
125 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
126 unsigned long flags;
127
128 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
129 ClearPagePrivate(page);
130 list_del(&page->lru);
131 b_dev_info->isolated_pages++;
132 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
133}
134
135static inline void __putback_balloon_page(struct page *page)
136{
137 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
138 unsigned long flags;
139
140 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
141 SetPagePrivate(page);
142 list_add(&page->lru, &b_dev_info->pages);
143 b_dev_info->isolated_pages--;
144 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
145}
146
147
148bool balloon_page_isolate(struct page *page)
149{
150
151
152
153
154
155
156
157
158
159 if (likely(get_page_unless_zero(page))) {
160
161
162
163
164
165
166
167
168
169
170
171 if (likely(trylock_page(page))) {
172
173
174
175
176
177 if (balloon_page_movable(page)) {
178 __isolate_balloon_page(page);
179 unlock_page(page);
180 return true;
181 }
182 unlock_page(page);
183 }
184 put_page(page);
185 }
186 return false;
187}
188
189
190void balloon_page_putback(struct page *page)
191{
192
193
194
195
196 lock_page(page);
197
198 if (__is_movable_balloon_page(page)) {
199 __putback_balloon_page(page);
200
201 put_page(page);
202 } else {
203 WARN_ON(1);
204 dump_page(page, "not movable balloon page");
205 }
206 unlock_page(page);
207}
208
209
210int balloon_page_migrate(struct page *newpage,
211 struct page *page, enum migrate_mode mode)
212{
213 struct balloon_dev_info *balloon = balloon_page_device(page);
214 int rc = -EAGAIN;
215
216
217
218
219
220
221 if (mode == MIGRATE_SYNC_NO_COPY)
222 return -EINVAL;
223
224
225
226
227
228
229 BUG_ON(!trylock_page(newpage));
230
231 if (WARN_ON(!__is_movable_balloon_page(page))) {
232 dump_page(page, "not movable balloon page");
233 unlock_page(newpage);
234 return rc;
235 }
236
237 if (balloon && balloon->migratepage)
238 rc = balloon->migratepage(balloon, newpage, page, mode);
239
240 unlock_page(newpage);
241 return rc;
242}
243#endif
244