1
2
3
4
5
6
7
8
9#include <linux/mm.h>
10#include <linux/gfp.h>
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
13#include <linux/swapops.h>
14#include <linux/init.h>
15#include <linux/pagemap.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/pagevec.h>
19#include <linux/migrate.h>
20
21#include <asm/pgtable.h>
22
23
24
25
26
27static const struct address_space_operations swap_aops = {
28 .writepage = swap_writepage,
29 .set_page_dirty = swap_set_page_dirty,
30#ifdef CONFIG_MIGRATION
31 .migratepage = migrate_page,
32#endif
33};
34
35struct address_space swapper_spaces[MAX_SWAPFILES] = {
36 [0 ... MAX_SWAPFILES - 1] = {
37 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
38 .i_mmap_writable = ATOMIC_INIT(0),
39 .a_ops = &swap_aops,
40 }
41};
42
43#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
44
45static struct {
46 unsigned long add_total;
47 unsigned long del_total;
48 unsigned long find_success;
49 unsigned long find_total;
50} swap_cache_info;
51
52unsigned long total_swapcache_pages(void)
53{
54 int i;
55 unsigned long ret = 0;
56
57 for (i = 0; i < MAX_SWAPFILES; i++)
58 ret += swapper_spaces[i].nrpages;
59 return ret;
60}
61
62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63
64void show_swap_cache_info(void)
65{
66 printk("%lu pages in swap cache\n", total_swapcache_pages());
67 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
68 swap_cache_info.add_total, swap_cache_info.del_total,
69 swap_cache_info.find_success, swap_cache_info.find_total);
70 printk("Free swap = %ldkB\n",
71 get_nr_swap_pages() << (PAGE_SHIFT - 10));
72 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
73}
74
75
76
77
78
79int __add_to_swap_cache(struct page *page, swp_entry_t entry)
80{
81 int error;
82 struct address_space *address_space;
83
84 VM_BUG_ON_PAGE(!PageLocked(page), page);
85 VM_BUG_ON_PAGE(PageSwapCache(page), page);
86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
87
88 page_cache_get(page);
89 SetPageSwapCache(page);
90 set_page_private(page, entry.val);
91
92 address_space = swap_address_space(entry);
93 spin_lock_irq(&address_space->tree_lock);
94 error = radix_tree_insert(&address_space->page_tree,
95 entry.val, page);
96 if (likely(!error)) {
97 address_space->nrpages++;
98 __inc_zone_page_state(page, NR_FILE_PAGES);
99 INC_CACHE_INFO(add_total);
100 }
101 spin_unlock_irq(&address_space->tree_lock);
102
103 if (unlikely(error)) {
104
105
106
107
108
109 VM_BUG_ON(error == -EEXIST);
110 set_page_private(page, 0UL);
111 ClearPageSwapCache(page);
112 page_cache_release(page);
113 }
114
115 return error;
116}
117
118
119int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
120{
121 int error;
122
123 error = radix_tree_maybe_preload(gfp_mask);
124 if (!error) {
125 error = __add_to_swap_cache(page, entry);
126 radix_tree_preload_end();
127 }
128 return error;
129}
130
131
132
133
134
135void __delete_from_swap_cache(struct page *page)
136{
137 swp_entry_t entry;
138 struct address_space *address_space;
139
140 VM_BUG_ON_PAGE(!PageLocked(page), page);
141 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
142 VM_BUG_ON_PAGE(PageWriteback(page), page);
143
144 entry.val = page_private(page);
145 address_space = swap_address_space(entry);
146 radix_tree_delete(&address_space->page_tree, page_private(page));
147 set_page_private(page, 0);
148 ClearPageSwapCache(page);
149 address_space->nrpages--;
150 __dec_zone_page_state(page, NR_FILE_PAGES);
151 INC_CACHE_INFO(del_total);
152}
153
154
155
156
157
158
159
160
161int add_to_swap(struct page *page, struct list_head *list)
162{
163 swp_entry_t entry;
164 int err;
165
166 VM_BUG_ON_PAGE(!PageLocked(page), page);
167 VM_BUG_ON_PAGE(!PageUptodate(page), page);
168
169 entry = get_swap_page();
170 if (!entry.val)
171 return 0;
172
173 if (unlikely(PageTransHuge(page)))
174 if (unlikely(split_huge_page_to_list(page, list))) {
175 swapcache_free(entry);
176 return 0;
177 }
178
179
180
181
182
183
184
185
186
187
188
189
190 err = add_to_swap_cache(page, entry,
191 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
192
193 if (!err) {
194 SetPageDirty(page);
195 return 1;
196 } else {
197
198
199
200
201 swapcache_free(entry);
202 return 0;
203 }
204}
205
206
207
208
209
210
211
212void delete_from_swap_cache(struct page *page)
213{
214 swp_entry_t entry;
215 struct address_space *address_space;
216
217 entry.val = page_private(page);
218
219 address_space = swap_address_space(entry);
220 spin_lock_irq(&address_space->tree_lock);
221 __delete_from_swap_cache(page);
222 spin_unlock_irq(&address_space->tree_lock);
223
224 swapcache_free(entry);
225 page_cache_release(page);
226}
227
228
229
230
231
232
233
234
235
236static inline void free_swap_cache(struct page *page)
237{
238 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
239 try_to_free_swap(page);
240 unlock_page(page);
241 }
242}
243
244
245
246
247
248void free_page_and_swap_cache(struct page *page)
249{
250 free_swap_cache(page);
251 page_cache_release(page);
252}
253
254
255
256
257
258void free_pages_and_swap_cache(struct page **pages, int nr)
259{
260 struct page **pagep = pages;
261 int i;
262
263 lru_add_drain();
264 for (i = 0; i < nr; i++)
265 free_swap_cache(pagep[i]);
266 release_pages(pagep, nr, false);
267}
268
269
270
271
272
273
274
275struct page * lookup_swap_cache(swp_entry_t entry)
276{
277 struct page *page;
278
279 page = find_get_page(swap_address_space(entry), entry.val);
280
281 if (page) {
282 INC_CACHE_INFO(find_success);
283 if (TestClearPageReadahead(page))
284 atomic_inc(&swapin_readahead_hits);
285 }
286
287 INC_CACHE_INFO(find_total);
288 return page;
289}
290
291struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
292 struct vm_area_struct *vma, unsigned long addr,
293 bool *new_page_allocated)
294{
295 struct page *found_page, *new_page = NULL;
296 struct address_space *swapper_space = swap_address_space(entry);
297 int err;
298 *new_page_allocated = false;
299
300 do {
301
302
303
304
305
306 found_page = find_get_page(swapper_space, entry.val);
307 if (found_page)
308 break;
309
310
311
312
313 if (!new_page) {
314 new_page = alloc_page_vma(gfp_mask, vma, addr);
315 if (!new_page)
316 break;
317 }
318
319
320
321
322 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
323 if (err)
324 break;
325
326
327
328
329 err = swapcache_prepare(entry);
330 if (err == -EEXIST) {
331 radix_tree_preload_end();
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347 cond_resched();
348 continue;
349 }
350 if (err) {
351 radix_tree_preload_end();
352 break;
353 }
354
355
356 __set_page_locked(new_page);
357 SetPageSwapBacked(new_page);
358 err = __add_to_swap_cache(new_page, entry);
359 if (likely(!err)) {
360 radix_tree_preload_end();
361
362
363
364 lru_cache_add_anon(new_page);
365 *new_page_allocated = true;
366 return new_page;
367 }
368 radix_tree_preload_end();
369 ClearPageSwapBacked(new_page);
370 __clear_page_locked(new_page);
371
372
373
374
375 swapcache_free(entry);
376 } while (err != -ENOMEM);
377
378 if (new_page)
379 page_cache_release(new_page);
380 return found_page;
381}
382
383
384
385
386
387
388
389struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
390 struct vm_area_struct *vma, unsigned long addr)
391{
392 bool page_was_allocated;
393 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
394 vma, addr, &page_was_allocated);
395
396 if (page_was_allocated)
397 swap_readpage(retpage);
398
399 return retpage;
400}
401
402static unsigned long swapin_nr_pages(unsigned long offset)
403{
404 static unsigned long prev_offset;
405 unsigned int pages, max_pages, last_ra;
406 static atomic_t last_readahead_pages;
407
408 max_pages = 1 << READ_ONCE(page_cluster);
409 if (max_pages <= 1)
410 return 1;
411
412
413
414
415
416
417 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
418 if (pages == 2) {
419
420
421
422
423
424 if (offset != prev_offset + 1 && offset != prev_offset - 1)
425 pages = 1;
426 prev_offset = offset;
427 } else {
428 unsigned int roundup = 4;
429 while (roundup < pages)
430 roundup <<= 1;
431 pages = roundup;
432 }
433
434 if (pages > max_pages)
435 pages = max_pages;
436
437
438 last_ra = atomic_read(&last_readahead_pages) / 2;
439 if (pages < last_ra)
440 pages = last_ra;
441 atomic_set(&last_readahead_pages, pages);
442
443 return pages;
444}
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
466 struct vm_area_struct *vma, unsigned long addr)
467{
468 struct page *page;
469 unsigned long entry_offset = swp_offset(entry);
470 unsigned long offset = entry_offset;
471 unsigned long start_offset, end_offset;
472 unsigned long mask;
473 struct blk_plug plug;
474
475 mask = swapin_nr_pages(offset) - 1;
476 if (!mask)
477 goto skip;
478
479
480 start_offset = offset & ~mask;
481 end_offset = offset | mask;
482 if (!start_offset)
483 start_offset++;
484
485 blk_start_plug(&plug);
486 for (offset = start_offset; offset <= end_offset ; offset++) {
487
488 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
489 gfp_mask, vma, addr);
490 if (!page)
491 continue;
492 if (offset != entry_offset)
493 SetPageReadahead(page);
494 page_cache_release(page);
495 }
496 blk_finish_plug(&plug);
497
498 lru_add_drain();
499skip:
500 return read_swap_cache_async(entry, gfp_mask, vma, addr);
501}
502