1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/gfp.h>
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/init.h>
16#include <linux/pagemap.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/pagevec.h>
20#include <linux/migrate.h>
21#include <linux/vmalloc.h>
22#include <linux/swap_slots.h>
23#include <linux/huge_mm.h>
24
25#include <asm/pgtable.h>
26
27
28
29
30
31static const struct address_space_operations swap_aops = {
32 .writepage = swap_writepage,
33 .set_page_dirty = swap_set_page_dirty,
34#ifdef CONFIG_MIGRATION
35 .migratepage = migrate_page,
36#endif
37};
38
39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
41static bool enable_vma_readahead __read_mostly = true;
42
43#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
46#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
47
48#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
49#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
51
52#define SWAP_RA_VAL(addr, win, hits) \
53 (((addr) & PAGE_MASK) | \
54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
55 ((hits) & SWAP_RA_HITS_MASK))
56
57
58#define GET_SWAP_RA_VAL(vma) \
59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
60
61#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
62#define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
63
64static struct {
65 unsigned long add_total;
66 unsigned long del_total;
67 unsigned long find_success;
68 unsigned long find_total;
69} swap_cache_info;
70
71unsigned long total_swapcache_pages(void)
72{
73 unsigned int i, j, nr;
74 unsigned long ret = 0;
75 struct address_space *spaces;
76
77 rcu_read_lock();
78 for (i = 0; i < MAX_SWAPFILES; i++) {
79
80
81
82
83
84
85 nr = nr_swapper_spaces[i];
86 spaces = rcu_dereference(swapper_spaces[i]);
87 if (!nr || !spaces)
88 continue;
89 for (j = 0; j < nr; j++)
90 ret += spaces[j].nrpages;
91 }
92 rcu_read_unlock();
93 return ret;
94}
95
96static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
97
98void show_swap_cache_info(void)
99{
100 printk("%lu pages in swap cache\n", total_swapcache_pages());
101 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
102 swap_cache_info.add_total, swap_cache_info.del_total,
103 swap_cache_info.find_success, swap_cache_info.find_total);
104 printk("Free swap = %ldkB\n",
105 get_nr_swap_pages() << (PAGE_SHIFT - 10));
106 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
107}
108
109
110
111
112
113int __add_to_swap_cache(struct page *page, swp_entry_t entry)
114{
115 int error, i, nr = hpage_nr_pages(page);
116 struct address_space *address_space;
117 pgoff_t idx = swp_offset(entry);
118
119 VM_BUG_ON_PAGE(!PageLocked(page), page);
120 VM_BUG_ON_PAGE(PageSwapCache(page), page);
121 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
122
123 page_ref_add(page, nr);
124 SetPageSwapCache(page);
125
126 address_space = swap_address_space(entry);
127 xa_lock_irq(&address_space->i_pages);
128 for (i = 0; i < nr; i++) {
129 set_page_private(page + i, entry.val + i);
130 error = radix_tree_insert(&address_space->i_pages,
131 idx + i, page + i);
132 if (unlikely(error))
133 break;
134 }
135 if (likely(!error)) {
136 address_space->nrpages += nr;
137 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
138 ADD_CACHE_INFO(add_total, nr);
139 } else {
140
141
142
143
144
145 VM_BUG_ON(error == -EEXIST);
146 set_page_private(page + i, 0UL);
147 while (i--) {
148 radix_tree_delete(&address_space->i_pages, idx + i);
149 set_page_private(page + i, 0UL);
150 }
151 ClearPageSwapCache(page);
152 page_ref_sub(page, nr);
153 }
154 xa_unlock_irq(&address_space->i_pages);
155
156 return error;
157}
158
159
160int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
161{
162 int error;
163
164 error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
165 if (!error) {
166 error = __add_to_swap_cache(page, entry);
167 radix_tree_preload_end();
168 }
169 return error;
170}
171
172
173
174
175
176void __delete_from_swap_cache(struct page *page)
177{
178 struct address_space *address_space;
179 int i, nr = hpage_nr_pages(page);
180 swp_entry_t entry;
181 pgoff_t idx;
182
183 VM_BUG_ON_PAGE(!PageLocked(page), page);
184 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
185 VM_BUG_ON_PAGE(PageWriteback(page), page);
186
187 entry.val = page_private(page);
188 address_space = swap_address_space(entry);
189 idx = swp_offset(entry);
190 for (i = 0; i < nr; i++) {
191 radix_tree_delete(&address_space->i_pages, idx + i);
192 set_page_private(page + i, 0);
193 }
194 ClearPageSwapCache(page);
195 address_space->nrpages -= nr;
196 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
197 ADD_CACHE_INFO(del_total, nr);
198}
199
200
201
202
203
204
205
206
207int add_to_swap(struct page *page)
208{
209 swp_entry_t entry;
210 int err;
211
212 VM_BUG_ON_PAGE(!PageLocked(page), page);
213 VM_BUG_ON_PAGE(!PageUptodate(page), page);
214
215 entry = get_swap_page(page);
216 if (!entry.val)
217 return 0;
218
219
220
221
222
223
224
225
226
227
228
229
230 err = add_to_swap_cache(page, entry,
231 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
232
233 if (err)
234
235
236
237
238 goto fail;
239
240
241
242
243
244
245
246
247
248
249 set_page_dirty(page);
250
251 return 1;
252
253fail:
254 put_swap_page(page, entry);
255 return 0;
256}
257
258
259
260
261
262
263
264void delete_from_swap_cache(struct page *page)
265{
266 swp_entry_t entry;
267 struct address_space *address_space;
268
269 entry.val = page_private(page);
270
271 address_space = swap_address_space(entry);
272 xa_lock_irq(&address_space->i_pages);
273 __delete_from_swap_cache(page);
274 xa_unlock_irq(&address_space->i_pages);
275
276 put_swap_page(page, entry);
277 page_ref_sub(page, hpage_nr_pages(page));
278}
279
280
281
282
283
284
285
286
287
288static inline void free_swap_cache(struct page *page)
289{
290 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
291 try_to_free_swap(page);
292 unlock_page(page);
293 }
294}
295
296
297
298
299
300void free_page_and_swap_cache(struct page *page)
301{
302 free_swap_cache(page);
303 if (!is_huge_zero_page(page))
304 put_page(page);
305}
306
307
308
309
310
311void free_pages_and_swap_cache(struct page **pages, int nr)
312{
313 struct page **pagep = pages;
314 int i;
315
316 lru_add_drain();
317 for (i = 0; i < nr; i++)
318 free_swap_cache(pagep[i]);
319 release_pages(pagep, nr);
320}
321
322static inline bool swap_use_vma_readahead(void)
323{
324 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
325}
326
327
328
329
330
331
332
333struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
334 unsigned long addr)
335{
336 struct page *page;
337 struct swap_info_struct *si;
338
339 si = get_swap_device(entry);
340 if (!si)
341 return NULL;
342 page = find_get_page(swap_address_space(entry), swp_offset(entry));
343 put_swap_device(si);
344
345 INC_CACHE_INFO(find_total);
346 if (page) {
347 bool vma_ra = swap_use_vma_readahead();
348 bool readahead;
349
350 INC_CACHE_INFO(find_success);
351
352
353
354
355 if (unlikely(PageTransCompound(page)))
356 return page;
357
358 readahead = TestClearPageReadahead(page);
359 if (vma && vma_ra) {
360 unsigned long ra_val;
361 int win, hits;
362
363 ra_val = GET_SWAP_RA_VAL(vma);
364 win = SWAP_RA_WIN(ra_val);
365 hits = SWAP_RA_HITS(ra_val);
366 if (readahead)
367 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
368 atomic_long_set(&vma->swap_readahead_info,
369 SWAP_RA_VAL(addr, win, hits));
370 }
371
372 if (readahead) {
373 count_vm_event(SWAP_RA_HIT);
374 if (!vma || !vma_ra)
375 atomic_inc(&swapin_readahead_hits);
376 }
377 }
378
379 return page;
380}
381
382struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
383 struct vm_area_struct *vma, unsigned long addr,
384 bool *new_page_allocated)
385{
386 struct page *found_page = NULL, *new_page = NULL;
387 struct swap_info_struct *si;
388 int err;
389 *new_page_allocated = false;
390
391 do {
392
393
394
395
396
397 si = get_swap_device(entry);
398 if (!si)
399 break;
400 found_page = find_get_page(swap_address_space(entry),
401 swp_offset(entry));
402 put_swap_device(si);
403 if (found_page)
404 break;
405
406
407
408
409
410
411
412
413
414 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
415 break;
416
417
418
419
420 if (!new_page) {
421 new_page = alloc_page_vma(gfp_mask, vma, addr);
422 if (!new_page)
423 break;
424 }
425
426
427
428
429 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
430 if (err)
431 break;
432
433
434
435
436 err = swapcache_prepare(entry);
437 if (err == -EEXIST) {
438 radix_tree_preload_end();
439
440
441
442
443
444 cond_resched();
445 continue;
446 }
447 if (err) {
448 radix_tree_preload_end();
449 break;
450 }
451
452
453 __SetPageLocked(new_page);
454 __SetPageSwapBacked(new_page);
455 err = __add_to_swap_cache(new_page, entry);
456 if (likely(!err)) {
457 radix_tree_preload_end();
458
459
460
461 SetPageWorkingset(new_page);
462 lru_cache_add_anon(new_page);
463 *new_page_allocated = true;
464 return new_page;
465 }
466 radix_tree_preload_end();
467 __ClearPageLocked(new_page);
468
469
470
471
472 put_swap_page(new_page, entry);
473 } while (err != -ENOMEM);
474
475 if (new_page)
476 put_page(new_page);
477 return found_page;
478}
479
480
481
482
483
484
485
486struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
487 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
488{
489 bool page_was_allocated;
490 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
491 vma, addr, &page_was_allocated);
492
493 if (page_was_allocated)
494 swap_readpage(retpage, do_poll);
495
496 return retpage;
497}
498
499static unsigned int __swapin_nr_pages(unsigned long prev_offset,
500 unsigned long offset,
501 int hits,
502 int max_pages,
503 int prev_win)
504{
505 unsigned int pages, last_ra;
506
507
508
509
510
511
512 pages = hits + 2;
513 if (pages == 2) {
514
515
516
517
518
519 if (offset != prev_offset + 1 && offset != prev_offset - 1)
520 pages = 1;
521 } else {
522 unsigned int roundup = 4;
523 while (roundup < pages)
524 roundup <<= 1;
525 pages = roundup;
526 }
527
528 if (pages > max_pages)
529 pages = max_pages;
530
531
532 last_ra = prev_win / 2;
533 if (pages < last_ra)
534 pages = last_ra;
535
536 return pages;
537}
538
539static unsigned long swapin_nr_pages(unsigned long offset)
540{
541 static unsigned long prev_offset;
542 unsigned int hits, pages, max_pages;
543 static atomic_t last_readahead_pages;
544
545 max_pages = 1 << READ_ONCE(page_cluster);
546 if (max_pages <= 1)
547 return 1;
548
549 hits = atomic_xchg(&swapin_readahead_hits, 0);
550 pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
551 atomic_read(&last_readahead_pages));
552 if (!hits)
553 prev_offset = offset;
554 atomic_set(&last_readahead_pages, pages);
555
556 return pages;
557}
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
578 struct vm_fault *vmf)
579{
580 struct page *page;
581 unsigned long entry_offset = swp_offset(entry);
582 unsigned long offset = entry_offset;
583 unsigned long start_offset, end_offset;
584 unsigned long mask;
585 struct swap_info_struct *si = swp_swap_info(entry);
586 struct blk_plug plug;
587 bool do_poll = true, page_allocated;
588 struct vm_area_struct *vma = vmf->vma;
589 unsigned long addr = vmf->address;
590
591 mask = swapin_nr_pages(offset) - 1;
592 if (!mask)
593 goto skip;
594
595 do_poll = false;
596
597 start_offset = offset & ~mask;
598 end_offset = offset | mask;
599 if (!start_offset)
600 start_offset++;
601 if (end_offset >= si->max)
602 end_offset = si->max - 1;
603
604 blk_start_plug(&plug);
605 for (offset = start_offset; offset <= end_offset ; offset++) {
606
607 page = __read_swap_cache_async(
608 swp_entry(swp_type(entry), offset),
609 gfp_mask, vma, addr, &page_allocated);
610 if (!page)
611 continue;
612 if (page_allocated) {
613 swap_readpage(page, false);
614 if (offset != entry_offset) {
615 SetPageReadahead(page);
616 count_vm_event(SWAP_RA);
617 }
618 }
619 put_page(page);
620 }
621 blk_finish_plug(&plug);
622
623 lru_add_drain();
624skip:
625 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
626}
627
628int init_swap_address_space(unsigned int type, unsigned long nr_pages)
629{
630 struct address_space *spaces, *space;
631 unsigned int i, nr;
632
633 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
634 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
635 if (!spaces)
636 return -ENOMEM;
637 for (i = 0; i < nr; i++) {
638 space = spaces + i;
639 INIT_RADIX_TREE(&space->i_pages, GFP_ATOMIC|__GFP_NOWARN);
640 atomic_set(&space->i_mmap_writable, 0);
641 space->a_ops = &swap_aops;
642
643 mapping_set_no_writeback_tags(space);
644 }
645 nr_swapper_spaces[type] = nr;
646 rcu_assign_pointer(swapper_spaces[type], spaces);
647
648 return 0;
649}
650
651void exit_swap_address_space(unsigned int type)
652{
653 struct address_space *spaces;
654
655 spaces = swapper_spaces[type];
656 nr_swapper_spaces[type] = 0;
657 rcu_assign_pointer(swapper_spaces[type], NULL);
658 synchronize_rcu();
659 kvfree(spaces);
660}
661
662static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
663 unsigned long faddr,
664 unsigned long lpfn,
665 unsigned long rpfn,
666 unsigned long *start,
667 unsigned long *end)
668{
669 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
670 PFN_DOWN(faddr & PMD_MASK));
671 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
672 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
673}
674
675static void swap_ra_info(struct vm_fault *vmf,
676 struct vma_swap_readahead *ra_info)
677{
678 struct vm_area_struct *vma = vmf->vma;
679 unsigned long ra_val;
680 swp_entry_t entry;
681 unsigned long faddr, pfn, fpfn;
682 unsigned long start, end;
683 pte_t *pte, *orig_pte;
684 unsigned int max_win, hits, prev_win, win, left;
685#ifndef CONFIG_64BIT
686 pte_t *tpte;
687#endif
688
689 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
690 SWAP_RA_ORDER_CEILING);
691 if (max_win == 1) {
692 ra_info->win = 1;
693 return;
694 }
695
696 faddr = vmf->address;
697 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
698 entry = pte_to_swp_entry(*pte);
699 if ((unlikely(non_swap_entry(entry)))) {
700 pte_unmap(orig_pte);
701 return;
702 }
703
704 fpfn = PFN_DOWN(faddr);
705 ra_val = GET_SWAP_RA_VAL(vma);
706 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
707 prev_win = SWAP_RA_WIN(ra_val);
708 hits = SWAP_RA_HITS(ra_val);
709 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
710 max_win, prev_win);
711 atomic_long_set(&vma->swap_readahead_info,
712 SWAP_RA_VAL(faddr, win, 0));
713
714 if (win == 1) {
715 pte_unmap(orig_pte);
716 return;
717 }
718
719
720 if (fpfn == pfn + 1)
721 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
722 else if (pfn == fpfn + 1)
723 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
724 &start, &end);
725 else {
726 left = (win - 1) / 2;
727 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
728 &start, &end);
729 }
730 ra_info->nr_pte = end - start;
731 ra_info->offset = fpfn - start;
732 pte -= ra_info->offset;
733#ifdef CONFIG_64BIT
734 ra_info->ptes = pte;
735#else
736 tpte = ra_info->ptes;
737 for (pfn = start; pfn != end; pfn++)
738 *tpte++ = *pte++;
739#endif
740 pte_unmap(orig_pte);
741}
742
743static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
744 struct vm_fault *vmf)
745{
746 struct blk_plug plug;
747 struct vm_area_struct *vma = vmf->vma;
748 struct page *page;
749 pte_t *pte, pentry;
750 swp_entry_t entry;
751 unsigned int i;
752 bool page_allocated;
753 struct vma_swap_readahead ra_info = {0,};
754
755 swap_ra_info(vmf, &ra_info);
756 if (ra_info.win == 1)
757 goto skip;
758
759 blk_start_plug(&plug);
760 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
761 i++, pte++) {
762 pentry = *pte;
763 if (pte_none(pentry))
764 continue;
765 if (pte_present(pentry))
766 continue;
767 entry = pte_to_swp_entry(pentry);
768 if (unlikely(non_swap_entry(entry)))
769 continue;
770 page = __read_swap_cache_async(entry, gfp_mask, vma,
771 vmf->address, &page_allocated);
772 if (!page)
773 continue;
774 if (page_allocated) {
775 swap_readpage(page, false);
776 if (i != ra_info.offset) {
777 SetPageReadahead(page);
778 count_vm_event(SWAP_RA);
779 }
780 }
781 put_page(page);
782 }
783 blk_finish_plug(&plug);
784 lru_add_drain();
785skip:
786 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
787 ra_info.win == 1);
788}
789
790
791
792
793
794
795
796
797
798
799
800
801
802struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
803 struct vm_fault *vmf)
804{
805 return swap_use_vma_readahead() ?
806 swap_vma_readahead(entry, gfp_mask, vmf) :
807 swap_cluster_readahead(entry, gfp_mask, vmf);
808}
809
810#ifdef CONFIG_SYSFS
811static ssize_t vma_ra_enabled_show(struct kobject *kobj,
812 struct kobj_attribute *attr, char *buf)
813{
814 return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
815}
816static ssize_t vma_ra_enabled_store(struct kobject *kobj,
817 struct kobj_attribute *attr,
818 const char *buf, size_t count)
819{
820 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
821 enable_vma_readahead = true;
822 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
823 enable_vma_readahead = false;
824 else
825 return -EINVAL;
826
827 return count;
828}
829static struct kobj_attribute vma_ra_enabled_attr =
830 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
831 vma_ra_enabled_store);
832
833static struct attribute *swap_attrs[] = {
834 &vma_ra_enabled_attr.attr,
835 NULL,
836};
837
838static struct attribute_group swap_attr_group = {
839 .attrs = swap_attrs,
840};
841
842static int __init swap_init_sysfs(void)
843{
844 int err;
845 struct kobject *swap_kobj;
846
847 swap_kobj = kobject_create_and_add("swap", mm_kobj);
848 if (!swap_kobj) {
849 pr_err("failed to create swap kobject\n");
850 return -ENOMEM;
851 }
852 err = sysfs_create_group(swap_kobj, &swap_attr_group);
853 if (err) {
854 pr_err("failed to register swap group\n");
855 goto delete_obj;
856 }
857 return 0;
858
859delete_obj:
860 kobject_put(swap_kobj);
861 return err;
862}
863subsys_initcall(swap_init_sysfs);
864#endif
865