1
2
3
4
5
6
7
8#include <linux/mm.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/balloon_compaction.h>
12
13
14
15
16
17
18
19
20
21
22struct page *balloon_page_alloc(void)
23{
24 struct page *page = alloc_page(balloon_mapping_gfp_mask() |
25 __GFP_NOMEMALLOC | __GFP_NORETRY);
26 return page;
27}
28EXPORT_SYMBOL_GPL(balloon_page_alloc);
29
30
31
32
33
34
35
36
37
38
39
40
41void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
42 struct page *page)
43{
44 unsigned long flags;
45
46
47
48
49
50
51 BUG_ON(!trylock_page(page));
52 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
53 balloon_page_insert(b_dev_info, page);
54 __count_vm_event(BALLOON_INFLATE);
55 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
56 unlock_page(page);
57}
58EXPORT_SYMBOL_GPL(balloon_page_enqueue);
59
60
61
62
63
64
65
66
67
68
69
70
71struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
72{
73 struct page *page, *tmp;
74 unsigned long flags;
75 bool dequeued_page;
76
77 dequeued_page = false;
78 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
79 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
80
81
82
83
84
85 if (trylock_page(page)) {
86#ifdef CONFIG_BALLOON_COMPACTION
87 if (PageIsolated(page)) {
88
89 unlock_page(page);
90 continue;
91 }
92#endif
93 balloon_page_delete(page);
94 __count_vm_event(BALLOON_DEFLATE);
95 unlock_page(page);
96 dequeued_page = true;
97 break;
98 }
99 }
100 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
101
102 if (!dequeued_page) {
103
104
105
106
107
108
109
110 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
111 if (unlikely(list_empty(&b_dev_info->pages) &&
112 !b_dev_info->isolated_pages))
113 BUG();
114 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
115 page = NULL;
116 }
117 return page;
118}
119EXPORT_SYMBOL_GPL(balloon_page_dequeue);
120
121#ifdef CONFIG_BALLOON_COMPACTION
122
123bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
124
125{
126 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
127 unsigned long flags;
128
129 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
130 list_del(&page->lru);
131 b_dev_info->isolated_pages++;
132 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
133
134 return true;
135}
136
137void balloon_page_putback(struct page *page)
138{
139 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
140 unsigned long flags;
141
142 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
143 list_add(&page->lru, &b_dev_info->pages);
144 b_dev_info->isolated_pages--;
145 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
146}
147
148
149
150int balloon_page_migrate(struct address_space *mapping,
151 struct page *newpage, struct page *page,
152 enum migrate_mode mode)
153{
154 struct balloon_dev_info *balloon = balloon_page_device(page);
155
156
157
158
159
160
161 if (mode == MIGRATE_SYNC_NO_COPY)
162 return -EINVAL;
163
164 VM_BUG_ON_PAGE(!PageLocked(page), page);
165 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
166
167 return balloon->migratepage(balloon, newpage, page, mode);
168}
169
170const struct address_space_operations balloon_aops = {
171 .migratepage = balloon_page_migrate,
172 .isolate_page = balloon_page_isolate,
173 .putback_page = balloon_page_putback,
174};
175EXPORT_SYMBOL_GPL(balloon_aops);
176
177#endif
178