1
2
3
4
5
6
7
8
9#include <linux/mm.h>
10#include <linux/slab.h>
11#include <linux/export.h>
12#include <linux/balloon_compaction.h>
13
14
15
16
17
18
19
20
21
22
23struct page *balloon_page_alloc(void)
24{
25 struct page *page = alloc_page(balloon_mapping_gfp_mask() |
26 __GFP_NOMEMALLOC | __GFP_NORETRY);
27 return page;
28}
29EXPORT_SYMBOL_GPL(balloon_page_alloc);
30
31
32
33
34
35
36
37
38
39
40
41
42void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
43 struct page *page)
44{
45 unsigned long flags;
46
47
48
49
50
51
52 BUG_ON(!trylock_page(page));
53 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
54 balloon_page_insert(b_dev_info, page);
55 __count_vm_event(BALLOON_INFLATE);
56 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
57 unlock_page(page);
58}
59EXPORT_SYMBOL_GPL(balloon_page_enqueue);
60
61
62
63
64
65
66
67
68
69
70
71
72struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
73{
74 struct page *page, *tmp;
75 unsigned long flags;
76 bool dequeued_page;
77
78 dequeued_page = false;
79 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
80 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
81
82
83
84
85
86 if (trylock_page(page)) {
87#ifdef CONFIG_BALLOON_COMPACTION
88 if (PageIsolated(page)) {
89
90 unlock_page(page);
91 continue;
92 }
93#endif
94 balloon_page_delete(page);
95 __count_vm_event(BALLOON_DEFLATE);
96 unlock_page(page);
97 dequeued_page = true;
98 break;
99 }
100 }
101 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
102
103 if (!dequeued_page) {
104
105
106
107
108
109
110
111 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
112 if (unlikely(list_empty(&b_dev_info->pages) &&
113 !b_dev_info->isolated_pages))
114 BUG();
115 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
116 page = NULL;
117 }
118 return page;
119}
120EXPORT_SYMBOL_GPL(balloon_page_dequeue);
121
122#ifdef CONFIG_BALLOON_COMPACTION
123
124bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
125
126{
127 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
128 unsigned long flags;
129
130 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
131 list_del(&page->lru);
132 b_dev_info->isolated_pages++;
133 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
134
135 return true;
136}
137
138void balloon_page_putback(struct page *page)
139{
140 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
141 unsigned long flags;
142
143 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
144 list_add(&page->lru, &b_dev_info->pages);
145 b_dev_info->isolated_pages--;
146 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
147}
148
149
150
151int balloon_page_migrate(struct address_space *mapping,
152 struct page *newpage, struct page *page,
153 enum migrate_mode mode)
154{
155 struct balloon_dev_info *balloon = balloon_page_device(page);
156
157
158
159
160
161
162 if (mode == MIGRATE_SYNC_NO_COPY)
163 return -EINVAL;
164
165 VM_BUG_ON_PAGE(!PageLocked(page), page);
166 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
167
168 return balloon->migratepage(balloon, newpage, page, mode);
169}
170
171const struct address_space_operations balloon_aops = {
172 .migratepage = balloon_page_migrate,
173 .isolate_page = balloon_page_isolate,
174 .putback_page = balloon_page_putback,
175};
176EXPORT_SYMBOL_GPL(balloon_aops);
177
178#endif
179