1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/gfp.h>
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/init.h>
16#include <linux/pagemap.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/pagevec.h>
20#include <linux/migrate.h>
21#include <linux/vmalloc.h>
22#include <linux/swap_slots.h>
23#include <linux/huge_mm.h>
24
25#include <asm/pgtable.h>
26
27
28
29
30
31static const struct address_space_operations swap_aops = {
32 .writepage = swap_writepage,
33 .set_page_dirty = swap_set_page_dirty,
34#ifdef CONFIG_MIGRATION
35 .migratepage = migrate_page,
36#endif
37};
38
39struct address_space *swapper_spaces[MAX_SWAPFILES];
40static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
41bool swap_vma_readahead = true;
42
43#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
46#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
47
48#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
49#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
51
52#define SWAP_RA_VAL(addr, win, hits) \
53 (((addr) & PAGE_MASK) | \
54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
55 ((hits) & SWAP_RA_HITS_MASK))
56
57
58#define GET_SWAP_RA_VAL(vma) \
59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
60
61#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
62#define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
63
64static struct {
65 unsigned long add_total;
66 unsigned long del_total;
67 unsigned long find_success;
68 unsigned long find_total;
69} swap_cache_info;
70
71unsigned long total_swapcache_pages(void)
72{
73 unsigned int i, j, nr;
74 unsigned long ret = 0;
75 struct address_space *spaces;
76
77 rcu_read_lock();
78 for (i = 0; i < MAX_SWAPFILES; i++) {
79
80
81
82
83
84
85 nr = nr_swapper_spaces[i];
86 spaces = rcu_dereference(swapper_spaces[i]);
87 if (!nr || !spaces)
88 continue;
89 for (j = 0; j < nr; j++)
90 ret += spaces[j].nrpages;
91 }
92 rcu_read_unlock();
93 return ret;
94}
95
96static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
97
98void show_swap_cache_info(void)
99{
100 printk("%lu pages in swap cache\n", total_swapcache_pages());
101 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
102 swap_cache_info.add_total, swap_cache_info.del_total,
103 swap_cache_info.find_success, swap_cache_info.find_total);
104 printk("Free swap = %ldkB\n",
105 get_nr_swap_pages() << (PAGE_SHIFT - 10));
106 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
107}
108
109
110
111
112
113int __add_to_swap_cache(struct page *page, swp_entry_t entry)
114{
115 int error, i, nr = hpage_nr_pages(page);
116 struct address_space *address_space;
117 pgoff_t idx = swp_offset(entry);
118
119 VM_BUG_ON_PAGE(!PageLocked(page), page);
120 VM_BUG_ON_PAGE(PageSwapCache(page), page);
121 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
122
123 page_ref_add(page, nr);
124 SetPageSwapCache(page);
125
126 address_space = swap_address_space(entry);
127 spin_lock_irq(&address_space->tree_lock);
128 for (i = 0; i < nr; i++) {
129 set_page_private(page + i, entry.val + i);
130 error = radix_tree_insert(&address_space->page_tree,
131 idx + i, page + i);
132 if (unlikely(error))
133 break;
134 }
135 if (likely(!error)) {
136 address_space->nrpages += nr;
137 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
138 ADD_CACHE_INFO(add_total, nr);
139 } else {
140
141
142
143
144
145 VM_BUG_ON(error == -EEXIST);
146 set_page_private(page + i, 0UL);
147 while (i--) {
148 radix_tree_delete(&address_space->page_tree, idx + i);
149 set_page_private(page + i, 0UL);
150 }
151 ClearPageSwapCache(page);
152 page_ref_sub(page, nr);
153 }
154 spin_unlock_irq(&address_space->tree_lock);
155
156 return error;
157}
158
159
160int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
161{
162 int error;
163
164 error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
165 if (!error) {
166 error = __add_to_swap_cache(page, entry);
167 radix_tree_preload_end();
168 }
169 return error;
170}
171
172
173
174
175
176void __delete_from_swap_cache(struct page *page)
177{
178 struct address_space *address_space;
179 int i, nr = hpage_nr_pages(page);
180 swp_entry_t entry;
181 pgoff_t idx;
182
183 VM_BUG_ON_PAGE(!PageLocked(page), page);
184 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
185 VM_BUG_ON_PAGE(PageWriteback(page), page);
186
187 entry.val = page_private(page);
188 address_space = swap_address_space(entry);
189 idx = swp_offset(entry);
190 for (i = 0; i < nr; i++) {
191 radix_tree_delete(&address_space->page_tree, idx + i);
192 set_page_private(page + i, 0);
193 }
194 ClearPageSwapCache(page);
195 address_space->nrpages -= nr;
196 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
197 ADD_CACHE_INFO(del_total, nr);
198}
199
200
201
202
203
204
205
206
207int add_to_swap(struct page *page)
208{
209 swp_entry_t entry;
210 int err;
211
212 VM_BUG_ON_PAGE(!PageLocked(page), page);
213 VM_BUG_ON_PAGE(!PageUptodate(page), page);
214
215 entry = get_swap_page(page);
216 if (!entry.val)
217 return 0;
218
219 if (mem_cgroup_try_charge_swap(page, entry))
220 goto fail;
221
222
223
224
225
226
227
228
229
230
231
232
233 err = add_to_swap_cache(page, entry,
234 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
235
236 if (err)
237
238
239
240
241 goto fail;
242
243
244
245
246
247
248
249
250
251
252 set_page_dirty(page);
253
254 return 1;
255
256fail:
257 put_swap_page(page, entry);
258 return 0;
259}
260
261
262
263
264
265
266
267void delete_from_swap_cache(struct page *page)
268{
269 swp_entry_t entry;
270 struct address_space *address_space;
271
272 entry.val = page_private(page);
273
274 address_space = swap_address_space(entry);
275 spin_lock_irq(&address_space->tree_lock);
276 __delete_from_swap_cache(page);
277 spin_unlock_irq(&address_space->tree_lock);
278
279 put_swap_page(page, entry);
280 page_ref_sub(page, hpage_nr_pages(page));
281}
282
283
284
285
286
287
288
289
290
291static inline void free_swap_cache(struct page *page)
292{
293 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
294 try_to_free_swap(page);
295 unlock_page(page);
296 }
297}
298
299
300
301
302
303void free_page_and_swap_cache(struct page *page)
304{
305 free_swap_cache(page);
306 if (!is_huge_zero_page(page))
307 put_page(page);
308}
309
310
311
312
313
314void free_pages_and_swap_cache(struct page **pages, int nr)
315{
316 struct page **pagep = pages;
317 int i;
318
319 lru_add_drain();
320 for (i = 0; i < nr; i++)
321 free_swap_cache(pagep[i]);
322 release_pages(pagep, nr, false);
323}
324
325
326
327
328
329
330
331struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
332 unsigned long addr)
333{
334 struct page *page;
335 unsigned long ra_info;
336 int win, hits, readahead;
337
338 page = find_get_page(swap_address_space(entry), swp_offset(entry));
339
340 INC_CACHE_INFO(find_total);
341 if (page) {
342 INC_CACHE_INFO(find_success);
343 if (unlikely(PageTransCompound(page)))
344 return page;
345 readahead = TestClearPageReadahead(page);
346 if (vma) {
347 ra_info = GET_SWAP_RA_VAL(vma);
348 win = SWAP_RA_WIN(ra_info);
349 hits = SWAP_RA_HITS(ra_info);
350 if (readahead)
351 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
352 atomic_long_set(&vma->swap_readahead_info,
353 SWAP_RA_VAL(addr, win, hits));
354 }
355 if (readahead) {
356 count_vm_event(SWAP_RA_HIT);
357 if (!vma)
358 atomic_inc(&swapin_readahead_hits);
359 }
360 }
361 return page;
362}
363
364struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
365 struct vm_area_struct *vma, unsigned long addr,
366 bool *new_page_allocated)
367{
368 struct page *found_page, *new_page = NULL;
369 struct address_space *swapper_space = swap_address_space(entry);
370 int err;
371 *new_page_allocated = false;
372
373 do {
374
375
376
377
378
379 found_page = find_get_page(swapper_space, swp_offset(entry));
380 if (found_page)
381 break;
382
383
384
385
386
387
388
389
390
391 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
392 break;
393
394
395
396
397 if (!new_page) {
398 new_page = alloc_page_vma(gfp_mask, vma, addr);
399 if (!new_page)
400 break;
401 }
402
403
404
405
406 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
407 if (err)
408 break;
409
410
411
412
413 err = swapcache_prepare(entry);
414 if (err == -EEXIST) {
415 radix_tree_preload_end();
416
417
418
419
420
421 cond_resched();
422 continue;
423 }
424 if (err) {
425 radix_tree_preload_end();
426 break;
427 }
428
429
430 __SetPageLocked(new_page);
431 __SetPageSwapBacked(new_page);
432 err = __add_to_swap_cache(new_page, entry);
433 if (likely(!err)) {
434 radix_tree_preload_end();
435
436
437
438 lru_cache_add_anon(new_page);
439 *new_page_allocated = true;
440 return new_page;
441 }
442 radix_tree_preload_end();
443 __ClearPageLocked(new_page);
444
445
446
447
448 put_swap_page(new_page, entry);
449 } while (err != -ENOMEM);
450
451 if (new_page)
452 put_page(new_page);
453 return found_page;
454}
455
456
457
458
459
460
461
462struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
463 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
464{
465 bool page_was_allocated;
466 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
467 vma, addr, &page_was_allocated);
468
469 if (page_was_allocated)
470 swap_readpage(retpage, do_poll);
471
472 return retpage;
473}
474
475static unsigned int __swapin_nr_pages(unsigned long prev_offset,
476 unsigned long offset,
477 int hits,
478 int max_pages,
479 int prev_win)
480{
481 unsigned int pages, last_ra;
482
483
484
485
486
487
488 pages = hits + 2;
489 if (pages == 2) {
490
491
492
493
494
495 if (offset != prev_offset + 1 && offset != prev_offset - 1)
496 pages = 1;
497 } else {
498 unsigned int roundup = 4;
499 while (roundup < pages)
500 roundup <<= 1;
501 pages = roundup;
502 }
503
504 if (pages > max_pages)
505 pages = max_pages;
506
507
508 last_ra = prev_win / 2;
509 if (pages < last_ra)
510 pages = last_ra;
511
512 return pages;
513}
514
515static unsigned long swapin_nr_pages(unsigned long offset)
516{
517 static unsigned long prev_offset;
518 unsigned int hits, pages, max_pages;
519 static atomic_t last_readahead_pages;
520
521 max_pages = 1 << READ_ONCE(page_cluster);
522 if (max_pages <= 1)
523 return 1;
524
525 hits = atomic_xchg(&swapin_readahead_hits, 0);
526 pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
527 atomic_read(&last_readahead_pages));
528 if (!hits)
529 prev_offset = offset;
530 atomic_set(&last_readahead_pages, pages);
531
532 return pages;
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
555 struct vm_area_struct *vma, unsigned long addr)
556{
557 struct page *page;
558 unsigned long entry_offset = swp_offset(entry);
559 unsigned long offset = entry_offset;
560 unsigned long start_offset, end_offset;
561 unsigned long mask;
562 struct blk_plug plug;
563 bool do_poll = true, page_allocated;
564
565 mask = swapin_nr_pages(offset) - 1;
566 if (!mask)
567 goto skip;
568
569 do_poll = false;
570
571 start_offset = offset & ~mask;
572 end_offset = offset | mask;
573 if (!start_offset)
574 start_offset++;
575
576 blk_start_plug(&plug);
577 for (offset = start_offset; offset <= end_offset ; offset++) {
578
579 page = __read_swap_cache_async(
580 swp_entry(swp_type(entry), offset),
581 gfp_mask, vma, addr, &page_allocated);
582 if (!page)
583 continue;
584 if (page_allocated) {
585 swap_readpage(page, false);
586 if (offset != entry_offset &&
587 likely(!PageTransCompound(page))) {
588 SetPageReadahead(page);
589 count_vm_event(SWAP_RA);
590 }
591 }
592 put_page(page);
593 }
594 blk_finish_plug(&plug);
595
596 lru_add_drain();
597skip:
598 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
599}
600
601int init_swap_address_space(unsigned int type, unsigned long nr_pages)
602{
603 struct address_space *spaces, *space;
604 unsigned int i, nr;
605
606 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
607 spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
608 if (!spaces)
609 return -ENOMEM;
610 for (i = 0; i < nr; i++) {
611 space = spaces + i;
612 INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN);
613 atomic_set(&space->i_mmap_writable, 0);
614 space->a_ops = &swap_aops;
615
616 mapping_set_no_writeback_tags(space);
617 spin_lock_init(&space->tree_lock);
618 }
619 nr_swapper_spaces[type] = nr;
620 rcu_assign_pointer(swapper_spaces[type], spaces);
621
622 return 0;
623}
624
625void exit_swap_address_space(unsigned int type)
626{
627 struct address_space *spaces;
628
629 spaces = swapper_spaces[type];
630 nr_swapper_spaces[type] = 0;
631 rcu_assign_pointer(swapper_spaces[type], NULL);
632 synchronize_rcu();
633 kvfree(spaces);
634}
635
636static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
637 unsigned long faddr,
638 unsigned long lpfn,
639 unsigned long rpfn,
640 unsigned long *start,
641 unsigned long *end)
642{
643 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
644 PFN_DOWN(faddr & PMD_MASK));
645 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
646 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
647}
648
649struct page *swap_readahead_detect(struct vm_fault *vmf,
650 struct vma_swap_readahead *swap_ra)
651{
652 struct vm_area_struct *vma = vmf->vma;
653 unsigned long swap_ra_info;
654 struct page *page;
655 swp_entry_t entry;
656 unsigned long faddr, pfn, fpfn;
657 unsigned long start, end;
658 pte_t *pte;
659 unsigned int max_win, hits, prev_win, win, left;
660#ifndef CONFIG_64BIT
661 pte_t *tpte;
662#endif
663
664 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
665 SWAP_RA_ORDER_CEILING);
666 if (max_win == 1) {
667 swap_ra->win = 1;
668 return NULL;
669 }
670
671 faddr = vmf->address;
672 entry = pte_to_swp_entry(vmf->orig_pte);
673 if ((unlikely(non_swap_entry(entry))))
674 return NULL;
675 page = lookup_swap_cache(entry, vma, faddr);
676 if (page)
677 return page;
678
679 fpfn = PFN_DOWN(faddr);
680 swap_ra_info = GET_SWAP_RA_VAL(vma);
681 pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
682 prev_win = SWAP_RA_WIN(swap_ra_info);
683 hits = SWAP_RA_HITS(swap_ra_info);
684 swap_ra->win = win = __swapin_nr_pages(pfn, fpfn, hits,
685 max_win, prev_win);
686 atomic_long_set(&vma->swap_readahead_info,
687 SWAP_RA_VAL(faddr, win, 0));
688
689 if (win == 1)
690 return NULL;
691
692
693 if (fpfn == pfn + 1)
694 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
695 else if (pfn == fpfn + 1)
696 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
697 &start, &end);
698 else {
699 left = (win - 1) / 2;
700 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
701 &start, &end);
702 }
703 swap_ra->nr_pte = end - start;
704 swap_ra->offset = fpfn - start;
705 pte = vmf->pte - swap_ra->offset;
706#ifdef CONFIG_64BIT
707 swap_ra->ptes = pte;
708#else
709 tpte = swap_ra->ptes;
710 for (pfn = start; pfn != end; pfn++)
711 *tpte++ = *pte++;
712#endif
713
714 return NULL;
715}
716
717struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
718 struct vm_fault *vmf,
719 struct vma_swap_readahead *swap_ra)
720{
721 struct blk_plug plug;
722 struct vm_area_struct *vma = vmf->vma;
723 struct page *page;
724 pte_t *pte, pentry;
725 swp_entry_t entry;
726 unsigned int i;
727 bool page_allocated;
728
729 if (swap_ra->win == 1)
730 goto skip;
731
732 blk_start_plug(&plug);
733 for (i = 0, pte = swap_ra->ptes; i < swap_ra->nr_pte;
734 i++, pte++) {
735 pentry = *pte;
736 if (pte_none(pentry))
737 continue;
738 if (pte_present(pentry))
739 continue;
740 entry = pte_to_swp_entry(pentry);
741 if (unlikely(non_swap_entry(entry)))
742 continue;
743 page = __read_swap_cache_async(entry, gfp_mask, vma,
744 vmf->address, &page_allocated);
745 if (!page)
746 continue;
747 if (page_allocated) {
748 swap_readpage(page, false);
749 if (i != swap_ra->offset &&
750 likely(!PageTransCompound(page))) {
751 SetPageReadahead(page);
752 count_vm_event(SWAP_RA);
753 }
754 }
755 put_page(page);
756 }
757 blk_finish_plug(&plug);
758 lru_add_drain();
759skip:
760 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
761 swap_ra->win == 1);
762}
763
764#ifdef CONFIG_SYSFS
765static ssize_t vma_ra_enabled_show(struct kobject *kobj,
766 struct kobj_attribute *attr, char *buf)
767{
768 return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false");
769}
770static ssize_t vma_ra_enabled_store(struct kobject *kobj,
771 struct kobj_attribute *attr,
772 const char *buf, size_t count)
773{
774 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
775 swap_vma_readahead = true;
776 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
777 swap_vma_readahead = false;
778 else
779 return -EINVAL;
780
781 return count;
782}
783static struct kobj_attribute vma_ra_enabled_attr =
784 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
785 vma_ra_enabled_store);
786
787static struct attribute *swap_attrs[] = {
788 &vma_ra_enabled_attr.attr,
789 NULL,
790};
791
792static struct attribute_group swap_attr_group = {
793 .attrs = swap_attrs,
794};
795
796static int __init swap_init_sysfs(void)
797{
798 int err;
799 struct kobject *swap_kobj;
800
801 swap_kobj = kobject_create_and_add("swap", mm_kobj);
802 if (!swap_kobj) {
803 pr_err("failed to create swap kobject\n");
804 return -ENOMEM;
805 }
806 err = sysfs_create_group(swap_kobj, &swap_attr_group);
807 if (err) {
808 pr_err("failed to register swap group\n");
809 goto delete_obj;
810 }
811 return 0;
812
813delete_obj:
814 kobject_put(swap_kobj);
815 return err;
816}
817subsys_initcall(swap_init_sysfs);
818#endif
819