1
2
3
4
5
6
7
8
9#include <linux/mm.h>
10#include <linux/sched/mm.h>
11#include <linux/sched/task.h>
12#include <linux/hugetlb.h>
13#include <linux/mman.h>
14#include <linux/slab.h>
15#include <linux/kernel_stat.h>
16#include <linux/swap.h>
17#include <linux/vmalloc.h>
18#include <linux/pagemap.h>
19#include <linux/namei.h>
20#include <linux/shmem_fs.h>
21#include <linux/blkdev.h>
22#include <linux/random.h>
23#include <linux/writeback.h>
24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
26#include <linux/init.h>
27#include <linux/ksm.h>
28#include <linux/rmap.h>
29#include <linux/security.h>
30#include <linux/backing-dev.h>
31#include <linux/mutex.h>
32#include <linux/capability.h>
33#include <linux/syscalls.h>
34#include <linux/memcontrol.h>
35#include <linux/poll.h>
36#include <linux/oom.h>
37#include <linux/frontswap.h>
38#include <linux/swapfile.h>
39#include <linux/export.h>
40#include <linux/swap_slots.h>
41#include <linux/sort.h>
42#include <linux/completion.h>
43
44#include <asm/tlbflush.h>
45#include <linux/swapops.h>
46#include <linux/swap_cgroup.h>
47
48static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
49 unsigned char);
50static void free_swap_count_continuations(struct swap_info_struct *);
51
52DEFINE_SPINLOCK(swap_lock);
53static unsigned int nr_swapfiles;
54atomic_long_t nr_swap_pages;
55
56
57
58
59
60EXPORT_SYMBOL_GPL(nr_swap_pages);
61
62long total_swap_pages;
63static int least_priority = -1;
64
65static const char Bad_file[] = "Bad swap file entry ";
66static const char Unused_file[] = "Unused swap file entry ";
67static const char Bad_offset[] = "Bad swap offset entry ";
68static const char Unused_offset[] = "Unused swap offset entry ";
69
70
71
72
73
74PLIST_HEAD(swap_active_head);
75
76
77
78
79
80
81
82
83
84
85
86
87
88static struct plist_head *swap_avail_heads;
89static DEFINE_SPINLOCK(swap_avail_lock);
90
91struct swap_info_struct *swap_info[MAX_SWAPFILES];
92
93static DEFINE_MUTEX(swapon_mutex);
94
95static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
96
97static atomic_t proc_poll_event = ATOMIC_INIT(0);
98
99atomic_t nr_rotate_swap = ATOMIC_INIT(0);
100
101static struct swap_info_struct *swap_type_to_swap_info(int type)
102{
103 if (type >= MAX_SWAPFILES)
104 return NULL;
105
106 return READ_ONCE(swap_info[type]);
107}
108
109static inline unsigned char swap_count(unsigned char ent)
110{
111 return ent & ~SWAP_HAS_CACHE;
112}
113
114
115#define TTRS_ANYWAY 0x1
116
117
118
119
120#define TTRS_UNMAPPED 0x2
121
122#define TTRS_FULL 0x4
123
124
125static int __try_to_reclaim_swap(struct swap_info_struct *si,
126 unsigned long offset, unsigned long flags)
127{
128 swp_entry_t entry = swp_entry(si->type, offset);
129 struct page *page;
130 int ret = 0;
131
132 page = find_get_page(swap_address_space(entry), offset);
133 if (!page)
134 return 0;
135
136
137
138
139
140
141
142 if (trylock_page(page)) {
143 if ((flags & TTRS_ANYWAY) ||
144 ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
145 ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
146 ret = try_to_free_swap(page);
147 unlock_page(page);
148 }
149 put_page(page);
150 return ret;
151}
152
153static inline struct swap_extent *first_se(struct swap_info_struct *sis)
154{
155 struct rb_node *rb = rb_first(&sis->swap_extent_root);
156 return rb_entry(rb, struct swap_extent, rb_node);
157}
158
159static inline struct swap_extent *next_se(struct swap_extent *se)
160{
161 struct rb_node *rb = rb_next(&se->rb_node);
162 return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
163}
164
165
166
167
168
169static int discard_swap(struct swap_info_struct *si)
170{
171 struct swap_extent *se;
172 sector_t start_block;
173 sector_t nr_blocks;
174 int err = 0;
175
176
177 se = first_se(si);
178 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
179 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
180 if (nr_blocks) {
181 err = blkdev_issue_discard(si->bdev, start_block,
182 nr_blocks, GFP_KERNEL, 0);
183 if (err)
184 return err;
185 cond_resched();
186 }
187
188 for (se = next_se(se); se; se = next_se(se)) {
189 start_block = se->start_block << (PAGE_SHIFT - 9);
190 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
191
192 err = blkdev_issue_discard(si->bdev, start_block,
193 nr_blocks, GFP_KERNEL, 0);
194 if (err)
195 break;
196
197 cond_resched();
198 }
199 return err;
200}
201
202static struct swap_extent *
203offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
204{
205 struct swap_extent *se;
206 struct rb_node *rb;
207
208 rb = sis->swap_extent_root.rb_node;
209 while (rb) {
210 se = rb_entry(rb, struct swap_extent, rb_node);
211 if (offset < se->start_page)
212 rb = rb->rb_left;
213 else if (offset >= se->start_page + se->nr_pages)
214 rb = rb->rb_right;
215 else
216 return se;
217 }
218
219 BUG();
220}
221
222sector_t swap_page_sector(struct page *page)
223{
224 struct swap_info_struct *sis = page_swap_info(page);
225 struct swap_extent *se;
226 sector_t sector;
227 pgoff_t offset;
228
229 offset = __page_file_index(page);
230 se = offset_to_swap_extent(sis, offset);
231 sector = se->start_block + (offset - se->start_page);
232 return sector << (PAGE_SHIFT - 9);
233}
234
235
236
237
238
239static void discard_swap_cluster(struct swap_info_struct *si,
240 pgoff_t start_page, pgoff_t nr_pages)
241{
242 struct swap_extent *se = offset_to_swap_extent(si, start_page);
243
244 while (nr_pages) {
245 pgoff_t offset = start_page - se->start_page;
246 sector_t start_block = se->start_block + offset;
247 sector_t nr_blocks = se->nr_pages - offset;
248
249 if (nr_blocks > nr_pages)
250 nr_blocks = nr_pages;
251 start_page += nr_blocks;
252 nr_pages -= nr_blocks;
253
254 start_block <<= PAGE_SHIFT - 9;
255 nr_blocks <<= PAGE_SHIFT - 9;
256 if (blkdev_issue_discard(si->bdev, start_block,
257 nr_blocks, GFP_NOIO, 0))
258 break;
259
260 se = next_se(se);
261 }
262}
263
264#ifdef CONFIG_THP_SWAP
265#define SWAPFILE_CLUSTER HPAGE_PMD_NR
266
267#define swap_entry_size(size) (size)
268#else
269#define SWAPFILE_CLUSTER 256
270
271
272
273
274
275#define swap_entry_size(size) 1
276#endif
277#define LATENCY_LIMIT 256
278
279static inline void cluster_set_flag(struct swap_cluster_info *info,
280 unsigned int flag)
281{
282 info->flags = flag;
283}
284
285static inline unsigned int cluster_count(struct swap_cluster_info *info)
286{
287 return info->data;
288}
289
290static inline void cluster_set_count(struct swap_cluster_info *info,
291 unsigned int c)
292{
293 info->data = c;
294}
295
296static inline void cluster_set_count_flag(struct swap_cluster_info *info,
297 unsigned int c, unsigned int f)
298{
299 info->flags = f;
300 info->data = c;
301}
302
303static inline unsigned int cluster_next(struct swap_cluster_info *info)
304{
305 return info->data;
306}
307
308static inline void cluster_set_next(struct swap_cluster_info *info,
309 unsigned int n)
310{
311 info->data = n;
312}
313
314static inline void cluster_set_next_flag(struct swap_cluster_info *info,
315 unsigned int n, unsigned int f)
316{
317 info->flags = f;
318 info->data = n;
319}
320
321static inline bool cluster_is_free(struct swap_cluster_info *info)
322{
323 return info->flags & CLUSTER_FLAG_FREE;
324}
325
326static inline bool cluster_is_null(struct swap_cluster_info *info)
327{
328 return info->flags & CLUSTER_FLAG_NEXT_NULL;
329}
330
331static inline void cluster_set_null(struct swap_cluster_info *info)
332{
333 info->flags = CLUSTER_FLAG_NEXT_NULL;
334 info->data = 0;
335}
336
337static inline bool cluster_is_huge(struct swap_cluster_info *info)
338{
339 if (IS_ENABLED(CONFIG_THP_SWAP))
340 return info->flags & CLUSTER_FLAG_HUGE;
341 return false;
342}
343
344static inline void cluster_clear_huge(struct swap_cluster_info *info)
345{
346 info->flags &= ~CLUSTER_FLAG_HUGE;
347}
348
349static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
350 unsigned long offset)
351{
352 struct swap_cluster_info *ci;
353
354 ci = si->cluster_info;
355 if (ci) {
356 ci += offset / SWAPFILE_CLUSTER;
357 spin_lock(&ci->lock);
358 }
359 return ci;
360}
361
362static inline void unlock_cluster(struct swap_cluster_info *ci)
363{
364 if (ci)
365 spin_unlock(&ci->lock);
366}
367
368
369
370
371
372static inline struct swap_cluster_info *lock_cluster_or_swap_info(
373 struct swap_info_struct *si, unsigned long offset)
374{
375 struct swap_cluster_info *ci;
376
377
378 ci = lock_cluster(si, offset);
379
380 if (!ci)
381 spin_lock(&si->lock);
382
383 return ci;
384}
385
386static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
387 struct swap_cluster_info *ci)
388{
389 if (ci)
390 unlock_cluster(ci);
391 else
392 spin_unlock(&si->lock);
393}
394
395static inline bool cluster_list_empty(struct swap_cluster_list *list)
396{
397 return cluster_is_null(&list->head);
398}
399
400static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
401{
402 return cluster_next(&list->head);
403}
404
405static void cluster_list_init(struct swap_cluster_list *list)
406{
407 cluster_set_null(&list->head);
408 cluster_set_null(&list->tail);
409}
410
411static void cluster_list_add_tail(struct swap_cluster_list *list,
412 struct swap_cluster_info *ci,
413 unsigned int idx)
414{
415 if (cluster_list_empty(list)) {
416 cluster_set_next_flag(&list->head, idx, 0);
417 cluster_set_next_flag(&list->tail, idx, 0);
418 } else {
419 struct swap_cluster_info *ci_tail;
420 unsigned int tail = cluster_next(&list->tail);
421
422
423
424
425
426 ci_tail = ci + tail;
427 spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
428 cluster_set_next(ci_tail, idx);
429 spin_unlock(&ci_tail->lock);
430 cluster_set_next_flag(&list->tail, idx, 0);
431 }
432}
433
434static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
435 struct swap_cluster_info *ci)
436{
437 unsigned int idx;
438
439 idx = cluster_next(&list->head);
440 if (cluster_next(&list->tail) == idx) {
441 cluster_set_null(&list->head);
442 cluster_set_null(&list->tail);
443 } else
444 cluster_set_next_flag(&list->head,
445 cluster_next(&ci[idx]), 0);
446
447 return idx;
448}
449
450
451static void swap_cluster_schedule_discard(struct swap_info_struct *si,
452 unsigned int idx)
453{
454
455
456
457
458
459
460 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
461 SWAP_MAP_BAD, SWAPFILE_CLUSTER);
462
463 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
464
465 schedule_work(&si->discard_work);
466}
467
468static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
469{
470 struct swap_cluster_info *ci = si->cluster_info;
471
472 cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
473 cluster_list_add_tail(&si->free_clusters, ci, idx);
474}
475
476
477
478
479
480static void swap_do_scheduled_discard(struct swap_info_struct *si)
481{
482 struct swap_cluster_info *info, *ci;
483 unsigned int idx;
484
485 info = si->cluster_info;
486
487 while (!cluster_list_empty(&si->discard_clusters)) {
488 idx = cluster_list_del_first(&si->discard_clusters, info);
489 spin_unlock(&si->lock);
490
491 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
492 SWAPFILE_CLUSTER);
493
494 spin_lock(&si->lock);
495 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
496 __free_cluster(si, idx);
497 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
498 0, SWAPFILE_CLUSTER);
499 unlock_cluster(ci);
500 }
501}
502
503static void swap_discard_work(struct work_struct *work)
504{
505 struct swap_info_struct *si;
506
507 si = container_of(work, struct swap_info_struct, discard_work);
508
509 spin_lock(&si->lock);
510 swap_do_scheduled_discard(si);
511 spin_unlock(&si->lock);
512}
513
514static void swap_users_ref_free(struct percpu_ref *ref)
515{
516 struct swap_info_struct *si;
517
518 si = container_of(ref, struct swap_info_struct, users);
519 complete(&si->comp);
520}
521
522static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
523{
524 struct swap_cluster_info *ci = si->cluster_info;
525
526 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
527 cluster_list_del_first(&si->free_clusters, ci);
528 cluster_set_count_flag(ci + idx, 0, 0);
529}
530
531static void free_cluster(struct swap_info_struct *si, unsigned long idx)
532{
533 struct swap_cluster_info *ci = si->cluster_info + idx;
534
535 VM_BUG_ON(cluster_count(ci) != 0);
536
537
538
539
540
541 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
542 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
543 swap_cluster_schedule_discard(si, idx);
544 return;
545 }
546
547 __free_cluster(si, idx);
548}
549
550
551
552
553
554static void inc_cluster_info_page(struct swap_info_struct *p,
555 struct swap_cluster_info *cluster_info, unsigned long page_nr)
556{
557 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
558
559 if (!cluster_info)
560 return;
561 if (cluster_is_free(&cluster_info[idx]))
562 alloc_cluster(p, idx);
563
564 VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
565 cluster_set_count(&cluster_info[idx],
566 cluster_count(&cluster_info[idx]) + 1);
567}
568
569
570
571
572
573
574static void dec_cluster_info_page(struct swap_info_struct *p,
575 struct swap_cluster_info *cluster_info, unsigned long page_nr)
576{
577 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
578
579 if (!cluster_info)
580 return;
581
582 VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
583 cluster_set_count(&cluster_info[idx],
584 cluster_count(&cluster_info[idx]) - 1);
585
586 if (cluster_count(&cluster_info[idx]) == 0)
587 free_cluster(p, idx);
588}
589
590
591
592
593
594static bool
595scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
596 unsigned long offset)
597{
598 struct percpu_cluster *percpu_cluster;
599 bool conflict;
600
601 offset /= SWAPFILE_CLUSTER;
602 conflict = !cluster_list_empty(&si->free_clusters) &&
603 offset != cluster_list_first(&si->free_clusters) &&
604 cluster_is_free(&si->cluster_info[offset]);
605
606 if (!conflict)
607 return false;
608
609 percpu_cluster = this_cpu_ptr(si->percpu_cluster);
610 cluster_set_null(&percpu_cluster->index);
611 return true;
612}
613
614
615
616
617
618static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
619 unsigned long *offset, unsigned long *scan_base)
620{
621 struct percpu_cluster *cluster;
622 struct swap_cluster_info *ci;
623 unsigned long tmp, max;
624
625new_cluster:
626 cluster = this_cpu_ptr(si->percpu_cluster);
627 if (cluster_is_null(&cluster->index)) {
628 if (!cluster_list_empty(&si->free_clusters)) {
629 cluster->index = si->free_clusters.head;
630 cluster->next = cluster_next(&cluster->index) *
631 SWAPFILE_CLUSTER;
632 } else if (!cluster_list_empty(&si->discard_clusters)) {
633
634
635
636
637
638 swap_do_scheduled_discard(si);
639 *scan_base = this_cpu_read(*si->cluster_next_cpu);
640 *offset = *scan_base;
641 goto new_cluster;
642 } else
643 return false;
644 }
645
646
647
648
649
650 tmp = cluster->next;
651 max = min_t(unsigned long, si->max,
652 (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
653 if (tmp < max) {
654 ci = lock_cluster(si, tmp);
655 while (tmp < max) {
656 if (!si->swap_map[tmp])
657 break;
658 tmp++;
659 }
660 unlock_cluster(ci);
661 }
662 if (tmp >= max) {
663 cluster_set_null(&cluster->index);
664 goto new_cluster;
665 }
666 cluster->next = tmp + 1;
667 *offset = tmp;
668 *scan_base = tmp;
669 return true;
670}
671
672static void __del_from_avail_list(struct swap_info_struct *p)
673{
674 int nid;
675
676 for_each_node(nid)
677 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
678}
679
680static void del_from_avail_list(struct swap_info_struct *p)
681{
682 spin_lock(&swap_avail_lock);
683 __del_from_avail_list(p);
684 spin_unlock(&swap_avail_lock);
685}
686
687static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
688 unsigned int nr_entries)
689{
690 unsigned int end = offset + nr_entries - 1;
691
692 if (offset == si->lowest_bit)
693 si->lowest_bit += nr_entries;
694 if (end == si->highest_bit)
695 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
696 si->inuse_pages += nr_entries;
697 if (si->inuse_pages == si->pages) {
698 si->lowest_bit = si->max;
699 si->highest_bit = 0;
700 del_from_avail_list(si);
701 }
702}
703
704static void add_to_avail_list(struct swap_info_struct *p)
705{
706 int nid;
707
708 spin_lock(&swap_avail_lock);
709 for_each_node(nid) {
710 WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
711 plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
712 }
713 spin_unlock(&swap_avail_lock);
714}
715
716static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
717 unsigned int nr_entries)
718{
719 unsigned long begin = offset;
720 unsigned long end = offset + nr_entries - 1;
721 void (*swap_slot_free_notify)(struct block_device *, unsigned long);
722
723 if (offset < si->lowest_bit)
724 si->lowest_bit = offset;
725 if (end > si->highest_bit) {
726 bool was_full = !si->highest_bit;
727
728 WRITE_ONCE(si->highest_bit, end);
729 if (was_full && (si->flags & SWP_WRITEOK))
730 add_to_avail_list(si);
731 }
732 atomic_long_add(nr_entries, &nr_swap_pages);
733 si->inuse_pages -= nr_entries;
734 if (si->flags & SWP_BLKDEV)
735 swap_slot_free_notify =
736 si->bdev->bd_disk->fops->swap_slot_free_notify;
737 else
738 swap_slot_free_notify = NULL;
739 while (offset <= end) {
740 arch_swap_invalidate_page(si->type, offset);
741 frontswap_invalidate_page(si->type, offset);
742 if (swap_slot_free_notify)
743 swap_slot_free_notify(si->bdev, offset);
744 offset++;
745 }
746 clear_shadow_from_swap_cache(si->type, begin, end);
747}
748
749static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
750{
751 unsigned long prev;
752
753 if (!(si->flags & SWP_SOLIDSTATE)) {
754 si->cluster_next = next;
755 return;
756 }
757
758 prev = this_cpu_read(*si->cluster_next_cpu);
759
760
761
762
763
764 if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
765 (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
766
767 if (si->highest_bit <= si->lowest_bit)
768 return;
769 next = si->lowest_bit +
770 prandom_u32_max(si->highest_bit - si->lowest_bit + 1);
771 next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
772 next = max_t(unsigned int, next, si->lowest_bit);
773 }
774 this_cpu_write(*si->cluster_next_cpu, next);
775}
776
777static int scan_swap_map_slots(struct swap_info_struct *si,
778 unsigned char usage, int nr,
779 swp_entry_t slots[])
780{
781 struct swap_cluster_info *ci;
782 unsigned long offset;
783 unsigned long scan_base;
784 unsigned long last_in_cluster = 0;
785 int latency_ration = LATENCY_LIMIT;
786 int n_ret = 0;
787 bool scanned_many = false;
788
789
790
791
792
793
794
795
796
797
798
799
800 si->flags += SWP_SCANNING;
801
802
803
804
805
806 if (si->flags & SWP_SOLIDSTATE)
807 scan_base = this_cpu_read(*si->cluster_next_cpu);
808 else
809 scan_base = si->cluster_next;
810 offset = scan_base;
811
812
813 if (si->cluster_info) {
814 if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
815 goto scan;
816 } else if (unlikely(!si->cluster_nr--)) {
817 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
818 si->cluster_nr = SWAPFILE_CLUSTER - 1;
819 goto checks;
820 }
821
822 spin_unlock(&si->lock);
823
824
825
826
827
828
829
830 scan_base = offset = si->lowest_bit;
831 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
832
833
834 for (; last_in_cluster <= si->highest_bit; offset++) {
835 if (si->swap_map[offset])
836 last_in_cluster = offset + SWAPFILE_CLUSTER;
837 else if (offset == last_in_cluster) {
838 spin_lock(&si->lock);
839 offset -= SWAPFILE_CLUSTER - 1;
840 si->cluster_next = offset;
841 si->cluster_nr = SWAPFILE_CLUSTER - 1;
842 goto checks;
843 }
844 if (unlikely(--latency_ration < 0)) {
845 cond_resched();
846 latency_ration = LATENCY_LIMIT;
847 }
848 }
849
850 offset = scan_base;
851 spin_lock(&si->lock);
852 si->cluster_nr = SWAPFILE_CLUSTER - 1;
853 }
854
855checks:
856 if (si->cluster_info) {
857 while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
858
859 if (n_ret)
860 goto done;
861 if (!scan_swap_map_try_ssd_cluster(si, &offset,
862 &scan_base))
863 goto scan;
864 }
865 }
866 if (!(si->flags & SWP_WRITEOK))
867 goto no_page;
868 if (!si->highest_bit)
869 goto no_page;
870 if (offset > si->highest_bit)
871 scan_base = offset = si->lowest_bit;
872
873 ci = lock_cluster(si, offset);
874
875 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
876 int swap_was_freed;
877 unlock_cluster(ci);
878 spin_unlock(&si->lock);
879 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
880 spin_lock(&si->lock);
881
882 if (swap_was_freed)
883 goto checks;
884 goto scan;
885 }
886
887 if (si->swap_map[offset]) {
888 unlock_cluster(ci);
889 if (!n_ret)
890 goto scan;
891 else
892 goto done;
893 }
894 WRITE_ONCE(si->swap_map[offset], usage);
895 inc_cluster_info_page(si, si->cluster_info, offset);
896 unlock_cluster(ci);
897
898 swap_range_alloc(si, offset, 1);
899 slots[n_ret++] = swp_entry(si->type, offset);
900
901
902 if ((n_ret == nr) || (offset >= si->highest_bit))
903 goto done;
904
905
906
907
908 if (unlikely(--latency_ration < 0)) {
909 if (n_ret)
910 goto done;
911 spin_unlock(&si->lock);
912 cond_resched();
913 spin_lock(&si->lock);
914 latency_ration = LATENCY_LIMIT;
915 }
916
917
918 if (si->cluster_info) {
919 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
920 goto checks;
921 } else if (si->cluster_nr && !si->swap_map[++offset]) {
922
923 --si->cluster_nr;
924 goto checks;
925 }
926
927
928
929
930
931
932 if (!scanned_many) {
933 unsigned long scan_limit;
934
935 if (offset < scan_base)
936 scan_limit = scan_base;
937 else
938 scan_limit = si->highest_bit;
939 for (; offset <= scan_limit && --latency_ration > 0;
940 offset++) {
941 if (!si->swap_map[offset])
942 goto checks;
943 }
944 }
945
946done:
947 set_cluster_next(si, offset + 1);
948 si->flags -= SWP_SCANNING;
949 return n_ret;
950
951scan:
952 spin_unlock(&si->lock);
953 while (++offset <= READ_ONCE(si->highest_bit)) {
954 if (data_race(!si->swap_map[offset])) {
955 spin_lock(&si->lock);
956 goto checks;
957 }
958 if (vm_swap_full() &&
959 READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
960 spin_lock(&si->lock);
961 goto checks;
962 }
963 if (unlikely(--latency_ration < 0)) {
964 cond_resched();
965 latency_ration = LATENCY_LIMIT;
966 scanned_many = true;
967 }
968 }
969 offset = si->lowest_bit;
970 while (offset < scan_base) {
971 if (data_race(!si->swap_map[offset])) {
972 spin_lock(&si->lock);
973 goto checks;
974 }
975 if (vm_swap_full() &&
976 READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
977 spin_lock(&si->lock);
978 goto checks;
979 }
980 if (unlikely(--latency_ration < 0)) {
981 cond_resched();
982 latency_ration = LATENCY_LIMIT;
983 scanned_many = true;
984 }
985 offset++;
986 }
987 spin_lock(&si->lock);
988
989no_page:
990 si->flags -= SWP_SCANNING;
991 return n_ret;
992}
993
994static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
995{
996 unsigned long idx;
997 struct swap_cluster_info *ci;
998 unsigned long offset;
999
1000
1001
1002
1003
1004 if (!IS_ENABLED(CONFIG_THP_SWAP)) {
1005 VM_WARN_ON_ONCE(1);
1006 return 0;
1007 }
1008
1009 if (cluster_list_empty(&si->free_clusters))
1010 return 0;
1011
1012 idx = cluster_list_first(&si->free_clusters);
1013 offset = idx * SWAPFILE_CLUSTER;
1014 ci = lock_cluster(si, offset);
1015 alloc_cluster(si, idx);
1016 cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
1017
1018 memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
1019 unlock_cluster(ci);
1020 swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
1021 *slot = swp_entry(si->type, offset);
1022
1023 return 1;
1024}
1025
1026static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
1027{
1028 unsigned long offset = idx * SWAPFILE_CLUSTER;
1029 struct swap_cluster_info *ci;
1030
1031 ci = lock_cluster(si, offset);
1032 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
1033 cluster_set_count_flag(ci, 0, 0);
1034 free_cluster(si, idx);
1035 unlock_cluster(ci);
1036 swap_range_free(si, offset, SWAPFILE_CLUSTER);
1037}
1038
1039int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
1040{
1041 unsigned long size = swap_entry_size(entry_size);
1042 struct swap_info_struct *si, *next;
1043 long avail_pgs;
1044 int n_ret = 0;
1045 int node;
1046
1047
1048 WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1049
1050 spin_lock(&swap_avail_lock);
1051
1052 avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1053 if (avail_pgs <= 0) {
1054 spin_unlock(&swap_avail_lock);
1055 goto noswap;
1056 }
1057
1058 n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1059
1060 atomic_long_sub(n_goal * size, &nr_swap_pages);
1061
1062start_over:
1063 node = numa_node_id();
1064 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1065
1066 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1067 spin_unlock(&swap_avail_lock);
1068 spin_lock(&si->lock);
1069 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1070 spin_lock(&swap_avail_lock);
1071 if (plist_node_empty(&si->avail_lists[node])) {
1072 spin_unlock(&si->lock);
1073 goto nextsi;
1074 }
1075 WARN(!si->highest_bit,
1076 "swap_info %d in list but !highest_bit\n",
1077 si->type);
1078 WARN(!(si->flags & SWP_WRITEOK),
1079 "swap_info %d in list but !SWP_WRITEOK\n",
1080 si->type);
1081 __del_from_avail_list(si);
1082 spin_unlock(&si->lock);
1083 goto nextsi;
1084 }
1085 if (size == SWAPFILE_CLUSTER) {
1086 if (si->flags & SWP_BLKDEV)
1087 n_ret = swap_alloc_cluster(si, swp_entries);
1088 } else
1089 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1090 n_goal, swp_entries);
1091 spin_unlock(&si->lock);
1092 if (n_ret || size == SWAPFILE_CLUSTER)
1093 goto check_out;
1094 pr_debug("scan_swap_map of si %d failed to find offset\n",
1095 si->type);
1096
1097 spin_lock(&swap_avail_lock);
1098nextsi:
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110 if (plist_node_empty(&next->avail_lists[node]))
1111 goto start_over;
1112 }
1113
1114 spin_unlock(&swap_avail_lock);
1115
1116check_out:
1117 if (n_ret < n_goal)
1118 atomic_long_add((long)(n_goal - n_ret) * size,
1119 &nr_swap_pages);
1120noswap:
1121 return n_ret;
1122}
1123
1124static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
1125{
1126 struct swap_info_struct *p;
1127 unsigned long offset;
1128
1129 if (!entry.val)
1130 goto out;
1131 p = swp_swap_info(entry);
1132 if (!p)
1133 goto bad_nofile;
1134 if (data_race(!(p->flags & SWP_USED)))
1135 goto bad_device;
1136 offset = swp_offset(entry);
1137 if (offset >= p->max)
1138 goto bad_offset;
1139 return p;
1140
1141bad_offset:
1142 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1143 goto out;
1144bad_device:
1145 pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1146 goto out;
1147bad_nofile:
1148 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1149out:
1150 return NULL;
1151}
1152
1153static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1154{
1155 struct swap_info_struct *p;
1156
1157 p = __swap_info_get(entry);
1158 if (!p)
1159 goto out;
1160 if (data_race(!p->swap_map[swp_offset(entry)]))
1161 goto bad_free;
1162 return p;
1163
1164bad_free:
1165 pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1166out:
1167 return NULL;
1168}
1169
1170static struct swap_info_struct *swap_info_get(swp_entry_t entry)
1171{
1172 struct swap_info_struct *p;
1173
1174 p = _swap_info_get(entry);
1175 if (p)
1176 spin_lock(&p->lock);
1177 return p;
1178}
1179
1180static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1181 struct swap_info_struct *q)
1182{
1183 struct swap_info_struct *p;
1184
1185 p = _swap_info_get(entry);
1186
1187 if (p != q) {
1188 if (q != NULL)
1189 spin_unlock(&q->lock);
1190 if (p != NULL)
1191 spin_lock(&p->lock);
1192 }
1193 return p;
1194}
1195
1196static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1197 unsigned long offset,
1198 unsigned char usage)
1199{
1200 unsigned char count;
1201 unsigned char has_cache;
1202
1203 count = p->swap_map[offset];
1204
1205 has_cache = count & SWAP_HAS_CACHE;
1206 count &= ~SWAP_HAS_CACHE;
1207
1208 if (usage == SWAP_HAS_CACHE) {
1209 VM_BUG_ON(!has_cache);
1210 has_cache = 0;
1211 } else if (count == SWAP_MAP_SHMEM) {
1212
1213
1214
1215
1216 count = 0;
1217 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1218 if (count == COUNT_CONTINUED) {
1219 if (swap_count_continued(p, offset, count))
1220 count = SWAP_MAP_MAX | COUNT_CONTINUED;
1221 else
1222 count = SWAP_MAP_MAX;
1223 } else
1224 count--;
1225 }
1226
1227 usage = count | has_cache;
1228 if (usage)
1229 WRITE_ONCE(p->swap_map[offset], usage);
1230 else
1231 WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
1232
1233 return usage;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265struct swap_info_struct *get_swap_device(swp_entry_t entry)
1266{
1267 struct swap_info_struct *si;
1268 unsigned long offset;
1269
1270 if (!entry.val)
1271 goto out;
1272 si = swp_swap_info(entry);
1273 if (!si)
1274 goto bad_nofile;
1275 if (!percpu_ref_tryget_live(&si->users))
1276 goto out;
1277
1278
1279
1280
1281
1282
1283
1284 smp_rmb();
1285 offset = swp_offset(entry);
1286 if (offset >= si->max)
1287 goto put_out;
1288
1289 return si;
1290bad_nofile:
1291 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1292out:
1293 return NULL;
1294put_out:
1295 percpu_ref_put(&si->users);
1296 return NULL;
1297}
1298
1299static unsigned char __swap_entry_free(struct swap_info_struct *p,
1300 swp_entry_t entry)
1301{
1302 struct swap_cluster_info *ci;
1303 unsigned long offset = swp_offset(entry);
1304 unsigned char usage;
1305
1306 ci = lock_cluster_or_swap_info(p, offset);
1307 usage = __swap_entry_free_locked(p, offset, 1);
1308 unlock_cluster_or_swap_info(p, ci);
1309 if (!usage)
1310 free_swap_slot(entry);
1311
1312 return usage;
1313}
1314
1315static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1316{
1317 struct swap_cluster_info *ci;
1318 unsigned long offset = swp_offset(entry);
1319 unsigned char count;
1320
1321 ci = lock_cluster(p, offset);
1322 count = p->swap_map[offset];
1323 VM_BUG_ON(count != SWAP_HAS_CACHE);
1324 p->swap_map[offset] = 0;
1325 dec_cluster_info_page(p, p->cluster_info, offset);
1326 unlock_cluster(ci);
1327
1328 mem_cgroup_uncharge_swap(entry, 1);
1329 swap_range_free(p, offset, 1);
1330}
1331
1332
1333
1334
1335
1336void swap_free(swp_entry_t entry)
1337{
1338 struct swap_info_struct *p;
1339
1340 p = _swap_info_get(entry);
1341 if (p)
1342 __swap_entry_free(p, entry);
1343}
1344
1345
1346
1347
1348void put_swap_page(struct page *page, swp_entry_t entry)
1349{
1350 unsigned long offset = swp_offset(entry);
1351 unsigned long idx = offset / SWAPFILE_CLUSTER;
1352 struct swap_cluster_info *ci;
1353 struct swap_info_struct *si;
1354 unsigned char *map;
1355 unsigned int i, free_entries = 0;
1356 unsigned char val;
1357 int size = swap_entry_size(thp_nr_pages(page));
1358
1359 si = _swap_info_get(entry);
1360 if (!si)
1361 return;
1362
1363 ci = lock_cluster_or_swap_info(si, offset);
1364 if (size == SWAPFILE_CLUSTER) {
1365 VM_BUG_ON(!cluster_is_huge(ci));
1366 map = si->swap_map + offset;
1367 for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1368 val = map[i];
1369 VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1370 if (val == SWAP_HAS_CACHE)
1371 free_entries++;
1372 }
1373 cluster_clear_huge(ci);
1374 if (free_entries == SWAPFILE_CLUSTER) {
1375 unlock_cluster_or_swap_info(si, ci);
1376 spin_lock(&si->lock);
1377 mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1378 swap_free_cluster(si, idx);
1379 spin_unlock(&si->lock);
1380 return;
1381 }
1382 }
1383 for (i = 0; i < size; i++, entry.val++) {
1384 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1385 unlock_cluster_or_swap_info(si, ci);
1386 free_swap_slot(entry);
1387 if (i == size - 1)
1388 return;
1389 lock_cluster_or_swap_info(si, offset);
1390 }
1391 }
1392 unlock_cluster_or_swap_info(si, ci);
1393}
1394
1395#ifdef CONFIG_THP_SWAP
1396int split_swap_cluster(swp_entry_t entry)
1397{
1398 struct swap_info_struct *si;
1399 struct swap_cluster_info *ci;
1400 unsigned long offset = swp_offset(entry);
1401
1402 si = _swap_info_get(entry);
1403 if (!si)
1404 return -EBUSY;
1405 ci = lock_cluster(si, offset);
1406 cluster_clear_huge(ci);
1407 unlock_cluster(ci);
1408 return 0;
1409}
1410#endif
1411
1412static int swp_entry_cmp(const void *ent1, const void *ent2)
1413{
1414 const swp_entry_t *e1 = ent1, *e2 = ent2;
1415
1416 return (int)swp_type(*e1) - (int)swp_type(*e2);
1417}
1418
1419void swapcache_free_entries(swp_entry_t *entries, int n)
1420{
1421 struct swap_info_struct *p, *prev;
1422 int i;
1423
1424 if (n <= 0)
1425 return;
1426
1427 prev = NULL;
1428 p = NULL;
1429
1430
1431
1432
1433
1434
1435 if (nr_swapfiles > 1)
1436 sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1437 for (i = 0; i < n; ++i) {
1438 p = swap_info_get_cont(entries[i], prev);
1439 if (p)
1440 swap_entry_free(p, entries[i]);
1441 prev = p;
1442 }
1443 if (p)
1444 spin_unlock(&p->lock);
1445}
1446
1447
1448
1449
1450
1451
1452int page_swapcount(struct page *page)
1453{
1454 int count = 0;
1455 struct swap_info_struct *p;
1456 struct swap_cluster_info *ci;
1457 swp_entry_t entry;
1458 unsigned long offset;
1459
1460 entry.val = page_private(page);
1461 p = _swap_info_get(entry);
1462 if (p) {
1463 offset = swp_offset(entry);
1464 ci = lock_cluster_or_swap_info(p, offset);
1465 count = swap_count(p->swap_map[offset]);
1466 unlock_cluster_or_swap_info(p, ci);
1467 }
1468 return count;
1469}
1470
1471int __swap_count(swp_entry_t entry)
1472{
1473 struct swap_info_struct *si;
1474 pgoff_t offset = swp_offset(entry);
1475 int count = 0;
1476
1477 si = get_swap_device(entry);
1478 if (si) {
1479 count = swap_count(si->swap_map[offset]);
1480 put_swap_device(si);
1481 }
1482 return count;
1483}
1484
1485static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1486{
1487 int count = 0;
1488 pgoff_t offset = swp_offset(entry);
1489 struct swap_cluster_info *ci;
1490
1491 ci = lock_cluster_or_swap_info(si, offset);
1492 count = swap_count(si->swap_map[offset]);
1493 unlock_cluster_or_swap_info(si, ci);
1494 return count;
1495}
1496
1497
1498
1499
1500
1501
1502int __swp_swapcount(swp_entry_t entry)
1503{
1504 int count = 0;
1505 struct swap_info_struct *si;
1506
1507 si = get_swap_device(entry);
1508 if (si) {
1509 count = swap_swapcount(si, entry);
1510 put_swap_device(si);
1511 }
1512 return count;
1513}
1514
1515
1516
1517
1518
1519int swp_swapcount(swp_entry_t entry)
1520{
1521 int count, tmp_count, n;
1522 struct swap_info_struct *p;
1523 struct swap_cluster_info *ci;
1524 struct page *page;
1525 pgoff_t offset;
1526 unsigned char *map;
1527
1528 p = _swap_info_get(entry);
1529 if (!p)
1530 return 0;
1531
1532 offset = swp_offset(entry);
1533
1534 ci = lock_cluster_or_swap_info(p, offset);
1535
1536 count = swap_count(p->swap_map[offset]);
1537 if (!(count & COUNT_CONTINUED))
1538 goto out;
1539
1540 count &= ~COUNT_CONTINUED;
1541 n = SWAP_MAP_MAX + 1;
1542
1543 page = vmalloc_to_page(p->swap_map + offset);
1544 offset &= ~PAGE_MASK;
1545 VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1546
1547 do {
1548 page = list_next_entry(page, lru);
1549 map = kmap_atomic(page);
1550 tmp_count = map[offset];
1551 kunmap_atomic(map);
1552
1553 count += (tmp_count & ~COUNT_CONTINUED) * n;
1554 n *= (SWAP_CONT_MAX + 1);
1555 } while (tmp_count & COUNT_CONTINUED);
1556out:
1557 unlock_cluster_or_swap_info(p, ci);
1558 return count;
1559}
1560
1561static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1562 swp_entry_t entry)
1563{
1564 struct swap_cluster_info *ci;
1565 unsigned char *map = si->swap_map;
1566 unsigned long roffset = swp_offset(entry);
1567 unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1568 int i;
1569 bool ret = false;
1570
1571 ci = lock_cluster_or_swap_info(si, offset);
1572 if (!ci || !cluster_is_huge(ci)) {
1573 if (swap_count(map[roffset]))
1574 ret = true;
1575 goto unlock_out;
1576 }
1577 for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1578 if (swap_count(map[offset + i])) {
1579 ret = true;
1580 break;
1581 }
1582 }
1583unlock_out:
1584 unlock_cluster_or_swap_info(si, ci);
1585 return ret;
1586}
1587
1588static bool page_swapped(struct page *page)
1589{
1590 swp_entry_t entry;
1591 struct swap_info_struct *si;
1592
1593 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
1594 return page_swapcount(page) != 0;
1595
1596 page = compound_head(page);
1597 entry.val = page_private(page);
1598 si = _swap_info_get(entry);
1599 if (si)
1600 return swap_page_trans_huge_swapped(si, entry);
1601 return false;
1602}
1603
1604static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1605 int *total_swapcount)
1606{
1607 int i, map_swapcount, _total_mapcount, _total_swapcount;
1608 unsigned long offset = 0;
1609 struct swap_info_struct *si;
1610 struct swap_cluster_info *ci = NULL;
1611 unsigned char *map = NULL;
1612 int mapcount, swapcount = 0;
1613
1614
1615 VM_BUG_ON_PAGE(PageHuge(page), page);
1616
1617 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
1618 mapcount = page_trans_huge_mapcount(page, total_mapcount);
1619 if (PageSwapCache(page))
1620 swapcount = page_swapcount(page);
1621 if (total_swapcount)
1622 *total_swapcount = swapcount;
1623 return mapcount + swapcount;
1624 }
1625
1626 page = compound_head(page);
1627
1628 _total_mapcount = _total_swapcount = map_swapcount = 0;
1629 if (PageSwapCache(page)) {
1630 swp_entry_t entry;
1631
1632 entry.val = page_private(page);
1633 si = _swap_info_get(entry);
1634 if (si) {
1635 map = si->swap_map;
1636 offset = swp_offset(entry);
1637 }
1638 }
1639 if (map)
1640 ci = lock_cluster(si, offset);
1641 for (i = 0; i < HPAGE_PMD_NR; i++) {
1642 mapcount = atomic_read(&page[i]._mapcount) + 1;
1643 _total_mapcount += mapcount;
1644 if (map) {
1645 swapcount = swap_count(map[offset + i]);
1646 _total_swapcount += swapcount;
1647 }
1648 map_swapcount = max(map_swapcount, mapcount + swapcount);
1649 }
1650 unlock_cluster(ci);
1651 if (PageDoubleMap(page)) {
1652 map_swapcount -= 1;
1653 _total_mapcount -= HPAGE_PMD_NR;
1654 }
1655 mapcount = compound_mapcount(page);
1656 map_swapcount += mapcount;
1657 _total_mapcount += mapcount;
1658 if (total_mapcount)
1659 *total_mapcount = _total_mapcount;
1660 if (total_swapcount)
1661 *total_swapcount = _total_swapcount;
1662
1663 return map_swapcount;
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1677{
1678 int count, total_mapcount, total_swapcount;
1679
1680 VM_BUG_ON_PAGE(!PageLocked(page), page);
1681 if (unlikely(PageKsm(page)))
1682 return false;
1683 count = page_trans_huge_map_swapcount(page, &total_mapcount,
1684 &total_swapcount);
1685 if (total_map_swapcount)
1686 *total_map_swapcount = total_mapcount + total_swapcount;
1687 if (count == 1 && PageSwapCache(page) &&
1688 (likely(!PageTransCompound(page)) ||
1689
1690 total_swapcount == page_swapcount(page))) {
1691 if (!PageWriteback(page)) {
1692 page = compound_head(page);
1693 delete_from_swap_cache(page);
1694 SetPageDirty(page);
1695 } else {
1696 swp_entry_t entry;
1697 struct swap_info_struct *p;
1698
1699 entry.val = page_private(page);
1700 p = swap_info_get(entry);
1701 if (p->flags & SWP_STABLE_WRITES) {
1702 spin_unlock(&p->lock);
1703 return false;
1704 }
1705 spin_unlock(&p->lock);
1706 }
1707 }
1708
1709 return count <= 1;
1710}
1711
1712
1713
1714
1715
1716int try_to_free_swap(struct page *page)
1717{
1718 VM_BUG_ON_PAGE(!PageLocked(page), page);
1719
1720 if (!PageSwapCache(page))
1721 return 0;
1722 if (PageWriteback(page))
1723 return 0;
1724 if (page_swapped(page))
1725 return 0;
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742 if (pm_suspended_storage())
1743 return 0;
1744
1745 page = compound_head(page);
1746 delete_from_swap_cache(page);
1747 SetPageDirty(page);
1748 return 1;
1749}
1750
1751
1752
1753
1754
1755int free_swap_and_cache(swp_entry_t entry)
1756{
1757 struct swap_info_struct *p;
1758 unsigned char count;
1759
1760 if (non_swap_entry(entry))
1761 return 1;
1762
1763 p = _swap_info_get(entry);
1764 if (p) {
1765 count = __swap_entry_free(p, entry);
1766 if (count == SWAP_HAS_CACHE &&
1767 !swap_page_trans_huge_swapped(p, entry))
1768 __try_to_reclaim_swap(p, swp_offset(entry),
1769 TTRS_UNMAPPED | TTRS_FULL);
1770 }
1771 return p != NULL;
1772}
1773
1774#ifdef CONFIG_HIBERNATION
1775
1776swp_entry_t get_swap_page_of_type(int type)
1777{
1778 struct swap_info_struct *si = swap_type_to_swap_info(type);
1779 swp_entry_t entry = {0};
1780
1781 if (!si)
1782 goto fail;
1783
1784
1785 spin_lock(&si->lock);
1786 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry))
1787 atomic_long_dec(&nr_swap_pages);
1788 spin_unlock(&si->lock);
1789fail:
1790 return entry;
1791}
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801int swap_type_of(dev_t device, sector_t offset)
1802{
1803 int type;
1804
1805 if (!device)
1806 return -1;
1807
1808 spin_lock(&swap_lock);
1809 for (type = 0; type < nr_swapfiles; type++) {
1810 struct swap_info_struct *sis = swap_info[type];
1811
1812 if (!(sis->flags & SWP_WRITEOK))
1813 continue;
1814
1815 if (device == sis->bdev->bd_dev) {
1816 struct swap_extent *se = first_se(sis);
1817
1818 if (se->start_block == offset) {
1819 spin_unlock(&swap_lock);
1820 return type;
1821 }
1822 }
1823 }
1824 spin_unlock(&swap_lock);
1825 return -ENODEV;
1826}
1827
1828int find_first_swap(dev_t *device)
1829{
1830 int type;
1831
1832 spin_lock(&swap_lock);
1833 for (type = 0; type < nr_swapfiles; type++) {
1834 struct swap_info_struct *sis = swap_info[type];
1835
1836 if (!(sis->flags & SWP_WRITEOK))
1837 continue;
1838 *device = sis->bdev->bd_dev;
1839 spin_unlock(&swap_lock);
1840 return type;
1841 }
1842 spin_unlock(&swap_lock);
1843 return -ENODEV;
1844}
1845
1846
1847
1848
1849
1850sector_t swapdev_block(int type, pgoff_t offset)
1851{
1852 struct swap_info_struct *si = swap_type_to_swap_info(type);
1853 struct swap_extent *se;
1854
1855 if (!si || !(si->flags & SWP_WRITEOK))
1856 return 0;
1857 se = offset_to_swap_extent(si, offset);
1858 return se->start_block + (offset - se->start_page);
1859}
1860
1861
1862
1863
1864
1865
1866
1867unsigned int count_swap_pages(int type, int free)
1868{
1869 unsigned int n = 0;
1870
1871 spin_lock(&swap_lock);
1872 if ((unsigned int)type < nr_swapfiles) {
1873 struct swap_info_struct *sis = swap_info[type];
1874
1875 spin_lock(&sis->lock);
1876 if (sis->flags & SWP_WRITEOK) {
1877 n = sis->pages;
1878 if (free)
1879 n -= sis->inuse_pages;
1880 }
1881 spin_unlock(&sis->lock);
1882 }
1883 spin_unlock(&swap_lock);
1884 return n;
1885}
1886#endif
1887
1888static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1889{
1890 return pte_same(pte_swp_clear_flags(pte), swp_pte);
1891}
1892
1893
1894
1895
1896
1897
1898static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1899 unsigned long addr, swp_entry_t entry, struct page *page)
1900{
1901 struct page *swapcache;
1902 spinlock_t *ptl;
1903 pte_t *pte;
1904 int ret = 1;
1905
1906 swapcache = page;
1907 page = ksm_might_need_to_copy(page, vma, addr);
1908 if (unlikely(!page))
1909 return -ENOMEM;
1910
1911 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1912 if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1913 ret = 0;
1914 goto out;
1915 }
1916
1917 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1918 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1919 get_page(page);
1920 set_pte_at(vma->vm_mm, addr, pte,
1921 pte_mkold(mk_pte(page, vma->vm_page_prot)));
1922 if (page == swapcache) {
1923 page_add_anon_rmap(page, vma, addr, false);
1924 } else {
1925 page_add_new_anon_rmap(page, vma, addr, false);
1926 lru_cache_add_inactive_or_unevictable(page, vma);
1927 }
1928 swap_free(entry);
1929out:
1930 pte_unmap_unlock(pte, ptl);
1931 if (page != swapcache) {
1932 unlock_page(page);
1933 put_page(page);
1934 }
1935 return ret;
1936}
1937
1938static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1939 unsigned long addr, unsigned long end,
1940 unsigned int type, bool frontswap,
1941 unsigned long *fs_pages_to_unuse)
1942{
1943 struct page *page;
1944 swp_entry_t entry;
1945 pte_t *pte;
1946 struct swap_info_struct *si;
1947 unsigned long offset;
1948 int ret = 0;
1949 volatile unsigned char *swap_map;
1950
1951 si = swap_info[type];
1952 pte = pte_offset_map(pmd, addr);
1953 do {
1954 if (!is_swap_pte(*pte))
1955 continue;
1956
1957 entry = pte_to_swp_entry(*pte);
1958 if (swp_type(entry) != type)
1959 continue;
1960
1961 offset = swp_offset(entry);
1962 if (frontswap && !frontswap_test(si, offset))
1963 continue;
1964
1965 pte_unmap(pte);
1966 swap_map = &si->swap_map[offset];
1967 page = lookup_swap_cache(entry, vma, addr);
1968 if (!page) {
1969 struct vm_fault vmf = {
1970 .vma = vma,
1971 .address = addr,
1972 .pmd = pmd,
1973 };
1974
1975 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
1976 &vmf);
1977 }
1978 if (!page) {
1979 if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
1980 goto try_next;
1981 return -ENOMEM;
1982 }
1983
1984 lock_page(page);
1985 wait_on_page_writeback(page);
1986 ret = unuse_pte(vma, pmd, addr, entry, page);
1987 if (ret < 0) {
1988 unlock_page(page);
1989 put_page(page);
1990 goto out;
1991 }
1992
1993 try_to_free_swap(page);
1994 unlock_page(page);
1995 put_page(page);
1996
1997 if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) {
1998 ret = FRONTSWAP_PAGES_UNUSED;
1999 goto out;
2000 }
2001try_next:
2002 pte = pte_offset_map(pmd, addr);
2003 } while (pte++, addr += PAGE_SIZE, addr != end);
2004 pte_unmap(pte - 1);
2005
2006 ret = 0;
2007out:
2008 return ret;
2009}
2010
2011static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2012 unsigned long addr, unsigned long end,
2013 unsigned int type, bool frontswap,
2014 unsigned long *fs_pages_to_unuse)
2015{
2016 pmd_t *pmd;
2017 unsigned long next;
2018 int ret;
2019
2020 pmd = pmd_offset(pud, addr);
2021 do {
2022 cond_resched();
2023 next = pmd_addr_end(addr, end);
2024 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
2025 continue;
2026 ret = unuse_pte_range(vma, pmd, addr, next, type,
2027 frontswap, fs_pages_to_unuse);
2028 if (ret)
2029 return ret;
2030 } while (pmd++, addr = next, addr != end);
2031 return 0;
2032}
2033
2034static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2035 unsigned long addr, unsigned long end,
2036 unsigned int type, bool frontswap,
2037 unsigned long *fs_pages_to_unuse)
2038{
2039 pud_t *pud;
2040 unsigned long next;
2041 int ret;
2042
2043 pud = pud_offset(p4d, addr);
2044 do {
2045 next = pud_addr_end(addr, end);
2046 if (pud_none_or_clear_bad(pud))
2047 continue;
2048 ret = unuse_pmd_range(vma, pud, addr, next, type,
2049 frontswap, fs_pages_to_unuse);
2050 if (ret)
2051 return ret;
2052 } while (pud++, addr = next, addr != end);
2053 return 0;
2054}
2055
2056static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2057 unsigned long addr, unsigned long end,
2058 unsigned int type, bool frontswap,
2059 unsigned long *fs_pages_to_unuse)
2060{
2061 p4d_t *p4d;
2062 unsigned long next;
2063 int ret;
2064
2065 p4d = p4d_offset(pgd, addr);
2066 do {
2067 next = p4d_addr_end(addr, end);
2068 if (p4d_none_or_clear_bad(p4d))
2069 continue;
2070 ret = unuse_pud_range(vma, p4d, addr, next, type,
2071 frontswap, fs_pages_to_unuse);
2072 if (ret)
2073 return ret;
2074 } while (p4d++, addr = next, addr != end);
2075 return 0;
2076}
2077
2078static int unuse_vma(struct vm_area_struct *vma, unsigned int type,
2079 bool frontswap, unsigned long *fs_pages_to_unuse)
2080{
2081 pgd_t *pgd;
2082 unsigned long addr, end, next;
2083 int ret;
2084
2085 addr = vma->vm_start;
2086 end = vma->vm_end;
2087
2088 pgd = pgd_offset(vma->vm_mm, addr);
2089 do {
2090 next = pgd_addr_end(addr, end);
2091 if (pgd_none_or_clear_bad(pgd))
2092 continue;
2093 ret = unuse_p4d_range(vma, pgd, addr, next, type,
2094 frontswap, fs_pages_to_unuse);
2095 if (ret)
2096 return ret;
2097 } while (pgd++, addr = next, addr != end);
2098 return 0;
2099}
2100
2101static int unuse_mm(struct mm_struct *mm, unsigned int type,
2102 bool frontswap, unsigned long *fs_pages_to_unuse)
2103{
2104 struct vm_area_struct *vma;
2105 int ret = 0;
2106
2107 mmap_read_lock(mm);
2108 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2109 if (vma->anon_vma) {
2110 ret = unuse_vma(vma, type, frontswap,
2111 fs_pages_to_unuse);
2112 if (ret)
2113 break;
2114 }
2115 cond_resched();
2116 }
2117 mmap_read_unlock(mm);
2118 return ret;
2119}
2120
2121
2122
2123
2124
2125
2126static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2127 unsigned int prev, bool frontswap)
2128{
2129 unsigned int i;
2130 unsigned char count;
2131
2132
2133
2134
2135
2136
2137
2138 for (i = prev + 1; i < si->max; i++) {
2139 count = READ_ONCE(si->swap_map[i]);
2140 if (count && swap_count(count) != SWAP_MAP_BAD)
2141 if (!frontswap || frontswap_test(si, i))
2142 break;
2143 if ((i % LATENCY_LIMIT) == 0)
2144 cond_resched();
2145 }
2146
2147 if (i == si->max)
2148 i = 0;
2149
2150 return i;
2151}
2152
2153
2154
2155
2156
2157int try_to_unuse(unsigned int type, bool frontswap,
2158 unsigned long pages_to_unuse)
2159{
2160 struct mm_struct *prev_mm;
2161 struct mm_struct *mm;
2162 struct list_head *p;
2163 int retval = 0;
2164 struct swap_info_struct *si = swap_info[type];
2165 struct page *page;
2166 swp_entry_t entry;
2167 unsigned int i;
2168
2169 if (!READ_ONCE(si->inuse_pages))
2170 return 0;
2171
2172 if (!frontswap)
2173 pages_to_unuse = 0;
2174
2175retry:
2176 retval = shmem_unuse(type, frontswap, &pages_to_unuse);
2177 if (retval)
2178 goto out;
2179
2180 prev_mm = &init_mm;
2181 mmget(prev_mm);
2182
2183 spin_lock(&mmlist_lock);
2184 p = &init_mm.mmlist;
2185 while (READ_ONCE(si->inuse_pages) &&
2186 !signal_pending(current) &&
2187 (p = p->next) != &init_mm.mmlist) {
2188
2189 mm = list_entry(p, struct mm_struct, mmlist);
2190 if (!mmget_not_zero(mm))
2191 continue;
2192 spin_unlock(&mmlist_lock);
2193 mmput(prev_mm);
2194 prev_mm = mm;
2195 retval = unuse_mm(mm, type, frontswap, &pages_to_unuse);
2196
2197 if (retval) {
2198 mmput(prev_mm);
2199 goto out;
2200 }
2201
2202
2203
2204
2205
2206 cond_resched();
2207 spin_lock(&mmlist_lock);
2208 }
2209 spin_unlock(&mmlist_lock);
2210
2211 mmput(prev_mm);
2212
2213 i = 0;
2214 while (READ_ONCE(si->inuse_pages) &&
2215 !signal_pending(current) &&
2216 (i = find_next_to_unuse(si, i, frontswap)) != 0) {
2217
2218 entry = swp_entry(type, i);
2219 page = find_get_page(swap_address_space(entry), i);
2220 if (!page)
2221 continue;
2222
2223
2224
2225
2226
2227
2228
2229 lock_page(page);
2230 wait_on_page_writeback(page);
2231 try_to_free_swap(page);
2232 unlock_page(page);
2233 put_page(page);
2234
2235
2236
2237
2238
2239
2240 if (pages_to_unuse && --pages_to_unuse == 0)
2241 goto out;
2242 }
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256 if (READ_ONCE(si->inuse_pages)) {
2257 if (!signal_pending(current))
2258 goto retry;
2259 retval = -EINTR;
2260 }
2261out:
2262 return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
2263}
2264
2265
2266
2267
2268
2269
2270
2271static void drain_mmlist(void)
2272{
2273 struct list_head *p, *next;
2274 unsigned int type;
2275
2276 for (type = 0; type < nr_swapfiles; type++)
2277 if (swap_info[type]->inuse_pages)
2278 return;
2279 spin_lock(&mmlist_lock);
2280 list_for_each_safe(p, next, &init_mm.mmlist)
2281 list_del_init(p);
2282 spin_unlock(&mmlist_lock);
2283}
2284
2285
2286
2287
2288static void destroy_swap_extents(struct swap_info_struct *sis)
2289{
2290 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2291 struct rb_node *rb = sis->swap_extent_root.rb_node;
2292 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2293
2294 rb_erase(rb, &sis->swap_extent_root);
2295 kfree(se);
2296 }
2297
2298 if (sis->flags & SWP_ACTIVATED) {
2299 struct file *swap_file = sis->swap_file;
2300 struct address_space *mapping = swap_file->f_mapping;
2301
2302 sis->flags &= ~SWP_ACTIVATED;
2303 if (mapping->a_ops->swap_deactivate)
2304 mapping->a_ops->swap_deactivate(swap_file);
2305 }
2306}
2307
2308
2309
2310
2311
2312
2313
2314int
2315add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2316 unsigned long nr_pages, sector_t start_block)
2317{
2318 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2319 struct swap_extent *se;
2320 struct swap_extent *new_se;
2321
2322
2323
2324
2325
2326 while (*link) {
2327 parent = *link;
2328 link = &parent->rb_right;
2329 }
2330
2331 if (parent) {
2332 se = rb_entry(parent, struct swap_extent, rb_node);
2333 BUG_ON(se->start_page + se->nr_pages != start_page);
2334 if (se->start_block + se->nr_pages == start_block) {
2335
2336 se->nr_pages += nr_pages;
2337 return 0;
2338 }
2339 }
2340
2341
2342 new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2343 if (new_se == NULL)
2344 return -ENOMEM;
2345 new_se->start_page = start_page;
2346 new_se->nr_pages = nr_pages;
2347 new_se->start_block = start_block;
2348
2349 rb_link_node(&new_se->rb_node, parent, link);
2350 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2351 return 1;
2352}
2353EXPORT_SYMBOL_GPL(add_swap_extent);
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2386{
2387 struct file *swap_file = sis->swap_file;
2388 struct address_space *mapping = swap_file->f_mapping;
2389 struct inode *inode = mapping->host;
2390 int ret;
2391
2392 if (S_ISBLK(inode->i_mode)) {
2393 ret = add_swap_extent(sis, 0, sis->max, 0);
2394 *span = sis->pages;
2395 return ret;
2396 }
2397
2398 if (mapping->a_ops->swap_activate) {
2399 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2400 if (ret >= 0)
2401 sis->flags |= SWP_ACTIVATED;
2402 if (!ret) {
2403 sis->flags |= SWP_FS_OPS;
2404 ret = add_swap_extent(sis, 0, sis->max, 0);
2405 *span = sis->pages;
2406 }
2407 return ret;
2408 }
2409
2410 return generic_swapfile_activate(sis, swap_file, span);
2411}
2412
2413static int swap_node(struct swap_info_struct *p)
2414{
2415 struct block_device *bdev;
2416
2417 if (p->bdev)
2418 bdev = p->bdev;
2419 else
2420 bdev = p->swap_file->f_inode->i_sb->s_bdev;
2421
2422 return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2423}
2424
2425static void setup_swap_info(struct swap_info_struct *p, int prio,
2426 unsigned char *swap_map,
2427 struct swap_cluster_info *cluster_info)
2428{
2429 int i;
2430
2431 if (prio >= 0)
2432 p->prio = prio;
2433 else
2434 p->prio = --least_priority;
2435
2436
2437
2438
2439 p->list.prio = -p->prio;
2440 for_each_node(i) {
2441 if (p->prio >= 0)
2442 p->avail_lists[i].prio = -p->prio;
2443 else {
2444 if (swap_node(p) == i)
2445 p->avail_lists[i].prio = 1;
2446 else
2447 p->avail_lists[i].prio = -p->prio;
2448 }
2449 }
2450 p->swap_map = swap_map;
2451 p->cluster_info = cluster_info;
2452}
2453
2454static void _enable_swap_info(struct swap_info_struct *p)
2455{
2456 p->flags |= SWP_WRITEOK;
2457 atomic_long_add(p->pages, &nr_swap_pages);
2458 total_swap_pages += p->pages;
2459
2460 assert_spin_locked(&swap_lock);
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471 plist_add(&p->list, &swap_active_head);
2472 add_to_avail_list(p);
2473}
2474
2475static void enable_swap_info(struct swap_info_struct *p, int prio,
2476 unsigned char *swap_map,
2477 struct swap_cluster_info *cluster_info,
2478 unsigned long *frontswap_map)
2479{
2480 frontswap_init(p->type, frontswap_map);
2481 spin_lock(&swap_lock);
2482 spin_lock(&p->lock);
2483 setup_swap_info(p, prio, swap_map, cluster_info);
2484 spin_unlock(&p->lock);
2485 spin_unlock(&swap_lock);
2486
2487
2488
2489 percpu_ref_resurrect(&p->users);
2490 spin_lock(&swap_lock);
2491 spin_lock(&p->lock);
2492 _enable_swap_info(p);
2493 spin_unlock(&p->lock);
2494 spin_unlock(&swap_lock);
2495}
2496
2497static void reinsert_swap_info(struct swap_info_struct *p)
2498{
2499 spin_lock(&swap_lock);
2500 spin_lock(&p->lock);
2501 setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2502 _enable_swap_info(p);
2503 spin_unlock(&p->lock);
2504 spin_unlock(&swap_lock);
2505}
2506
2507bool has_usable_swap(void)
2508{
2509 bool ret = true;
2510
2511 spin_lock(&swap_lock);
2512 if (plist_head_empty(&swap_active_head))
2513 ret = false;
2514 spin_unlock(&swap_lock);
2515 return ret;
2516}
2517
2518SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2519{
2520 struct swap_info_struct *p = NULL;
2521 unsigned char *swap_map;
2522 struct swap_cluster_info *cluster_info;
2523 unsigned long *frontswap_map;
2524 struct file *swap_file, *victim;
2525 struct address_space *mapping;
2526 struct inode *inode;
2527 struct filename *pathname;
2528 int err, found = 0;
2529 unsigned int old_block_size;
2530
2531 if (!capable(CAP_SYS_ADMIN))
2532 return -EPERM;
2533
2534 BUG_ON(!current->mm);
2535
2536 pathname = getname(specialfile);
2537 if (IS_ERR(pathname))
2538 return PTR_ERR(pathname);
2539
2540 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2541 err = PTR_ERR(victim);
2542 if (IS_ERR(victim))
2543 goto out;
2544
2545 mapping = victim->f_mapping;
2546 spin_lock(&swap_lock);
2547 plist_for_each_entry(p, &swap_active_head, list) {
2548 if (p->flags & SWP_WRITEOK) {
2549 if (p->swap_file->f_mapping == mapping) {
2550 found = 1;
2551 break;
2552 }
2553 }
2554 }
2555 if (!found) {
2556 err = -EINVAL;
2557 spin_unlock(&swap_lock);
2558 goto out_dput;
2559 }
2560 if (!security_vm_enough_memory_mm(current->mm, p->pages))
2561 vm_unacct_memory(p->pages);
2562 else {
2563 err = -ENOMEM;
2564 spin_unlock(&swap_lock);
2565 goto out_dput;
2566 }
2567 del_from_avail_list(p);
2568 spin_lock(&p->lock);
2569 if (p->prio < 0) {
2570 struct swap_info_struct *si = p;
2571 int nid;
2572
2573 plist_for_each_entry_continue(si, &swap_active_head, list) {
2574 si->prio++;
2575 si->list.prio--;
2576 for_each_node(nid) {
2577 if (si->avail_lists[nid].prio != 1)
2578 si->avail_lists[nid].prio--;
2579 }
2580 }
2581 least_priority++;
2582 }
2583 plist_del(&p->list, &swap_active_head);
2584 atomic_long_sub(p->pages, &nr_swap_pages);
2585 total_swap_pages -= p->pages;
2586 p->flags &= ~SWP_WRITEOK;
2587 spin_unlock(&p->lock);
2588 spin_unlock(&swap_lock);
2589
2590 disable_swap_slots_cache_lock();
2591
2592 set_current_oom_origin();
2593 err = try_to_unuse(p->type, false, 0);
2594 clear_current_oom_origin();
2595
2596 if (err) {
2597
2598 reinsert_swap_info(p);
2599 reenable_swap_slots_cache_unlock();
2600 goto out_dput;
2601 }
2602
2603 reenable_swap_slots_cache_unlock();
2604
2605
2606
2607
2608
2609
2610
2611
2612 percpu_ref_kill(&p->users);
2613 synchronize_rcu();
2614 wait_for_completion(&p->comp);
2615
2616 flush_work(&p->discard_work);
2617
2618 destroy_swap_extents(p);
2619 if (p->flags & SWP_CONTINUED)
2620 free_swap_count_continuations(p);
2621
2622 if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
2623 atomic_dec(&nr_rotate_swap);
2624
2625 mutex_lock(&swapon_mutex);
2626 spin_lock(&swap_lock);
2627 spin_lock(&p->lock);
2628 drain_mmlist();
2629
2630
2631 p->highest_bit = 0;
2632 while (p->flags >= SWP_SCANNING) {
2633 spin_unlock(&p->lock);
2634 spin_unlock(&swap_lock);
2635 schedule_timeout_uninterruptible(1);
2636 spin_lock(&swap_lock);
2637 spin_lock(&p->lock);
2638 }
2639
2640 swap_file = p->swap_file;
2641 old_block_size = p->old_block_size;
2642 p->swap_file = NULL;
2643 p->max = 0;
2644 swap_map = p->swap_map;
2645 p->swap_map = NULL;
2646 cluster_info = p->cluster_info;
2647 p->cluster_info = NULL;
2648 frontswap_map = frontswap_map_get(p);
2649 spin_unlock(&p->lock);
2650 spin_unlock(&swap_lock);
2651 arch_swap_invalidate_area(p->type);
2652 frontswap_invalidate_area(p->type);
2653 frontswap_map_set(p, NULL);
2654 mutex_unlock(&swapon_mutex);
2655 free_percpu(p->percpu_cluster);
2656 p->percpu_cluster = NULL;
2657 free_percpu(p->cluster_next_cpu);
2658 p->cluster_next_cpu = NULL;
2659 vfree(swap_map);
2660 kvfree(cluster_info);
2661 kvfree(frontswap_map);
2662
2663 swap_cgroup_swapoff(p->type);
2664 exit_swap_address_space(p->type);
2665
2666 inode = mapping->host;
2667 if (S_ISBLK(inode->i_mode)) {
2668 struct block_device *bdev = I_BDEV(inode);
2669
2670 set_blocksize(bdev, old_block_size);
2671 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2672 }
2673
2674 inode_lock(inode);
2675 inode->i_flags &= ~S_SWAPFILE;
2676 inode_unlock(inode);
2677 filp_close(swap_file, NULL);
2678
2679
2680
2681
2682
2683
2684 spin_lock(&swap_lock);
2685 p->flags = 0;
2686 spin_unlock(&swap_lock);
2687
2688 err = 0;
2689 atomic_inc(&proc_poll_event);
2690 wake_up_interruptible(&proc_poll_wait);
2691
2692out_dput:
2693 filp_close(victim, NULL);
2694out:
2695 putname(pathname);
2696 return err;
2697}
2698
2699#ifdef CONFIG_PROC_FS
2700static __poll_t swaps_poll(struct file *file, poll_table *wait)
2701{
2702 struct seq_file *seq = file->private_data;
2703
2704 poll_wait(file, &proc_poll_wait, wait);
2705
2706 if (seq->poll_event != atomic_read(&proc_poll_event)) {
2707 seq->poll_event = atomic_read(&proc_poll_event);
2708 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2709 }
2710
2711 return EPOLLIN | EPOLLRDNORM;
2712}
2713
2714
2715static void *swap_start(struct seq_file *swap, loff_t *pos)
2716{
2717 struct swap_info_struct *si;
2718 int type;
2719 loff_t l = *pos;
2720
2721 mutex_lock(&swapon_mutex);
2722
2723 if (!l)
2724 return SEQ_START_TOKEN;
2725
2726 for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2727 if (!(si->flags & SWP_USED) || !si->swap_map)
2728 continue;
2729 if (!--l)
2730 return si;
2731 }
2732
2733 return NULL;
2734}
2735
2736static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2737{
2738 struct swap_info_struct *si = v;
2739 int type;
2740
2741 if (v == SEQ_START_TOKEN)
2742 type = 0;
2743 else
2744 type = si->type + 1;
2745
2746 ++(*pos);
2747 for (; (si = swap_type_to_swap_info(type)); type++) {
2748 if (!(si->flags & SWP_USED) || !si->swap_map)
2749 continue;
2750 return si;
2751 }
2752
2753 return NULL;
2754}
2755
2756static void swap_stop(struct seq_file *swap, void *v)
2757{
2758 mutex_unlock(&swapon_mutex);
2759}
2760
2761static int swap_show(struct seq_file *swap, void *v)
2762{
2763 struct swap_info_struct *si = v;
2764 struct file *file;
2765 int len;
2766 unsigned int bytes, inuse;
2767
2768 if (si == SEQ_START_TOKEN) {
2769 seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2770 return 0;
2771 }
2772
2773 bytes = si->pages << (PAGE_SHIFT - 10);
2774 inuse = si->inuse_pages << (PAGE_SHIFT - 10);
2775
2776 file = si->swap_file;
2777 len = seq_file_path(swap, file, " \t\n\\");
2778 seq_printf(swap, "%*s%s\t%u\t%s%u\t%s%d\n",
2779 len < 40 ? 40 - len : 1, " ",
2780 S_ISBLK(file_inode(file)->i_mode) ?
2781 "partition" : "file\t",
2782 bytes, bytes < 10000000 ? "\t" : "",
2783 inuse, inuse < 10000000 ? "\t" : "",
2784 si->prio);
2785 return 0;
2786}
2787
2788static const struct seq_operations swaps_op = {
2789 .start = swap_start,
2790 .next = swap_next,
2791 .stop = swap_stop,
2792 .show = swap_show
2793};
2794
2795static int swaps_open(struct inode *inode, struct file *file)
2796{
2797 struct seq_file *seq;
2798 int ret;
2799
2800 ret = seq_open(file, &swaps_op);
2801 if (ret)
2802 return ret;
2803
2804 seq = file->private_data;
2805 seq->poll_event = atomic_read(&proc_poll_event);
2806 return 0;
2807}
2808
2809static const struct proc_ops swaps_proc_ops = {
2810 .proc_flags = PROC_ENTRY_PERMANENT,
2811 .proc_open = swaps_open,
2812 .proc_read = seq_read,
2813 .proc_lseek = seq_lseek,
2814 .proc_release = seq_release,
2815 .proc_poll = swaps_poll,
2816};
2817
2818static int __init procswaps_init(void)
2819{
2820 proc_create("swaps", 0, NULL, &swaps_proc_ops);
2821 return 0;
2822}
2823__initcall(procswaps_init);
2824#endif
2825
2826#ifdef MAX_SWAPFILES_CHECK
2827static int __init max_swapfiles_check(void)
2828{
2829 MAX_SWAPFILES_CHECK();
2830 return 0;
2831}
2832late_initcall(max_swapfiles_check);
2833#endif
2834
2835static struct swap_info_struct *alloc_swap_info(void)
2836{
2837 struct swap_info_struct *p;
2838 struct swap_info_struct *defer = NULL;
2839 unsigned int type;
2840 int i;
2841
2842 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2843 if (!p)
2844 return ERR_PTR(-ENOMEM);
2845
2846 if (percpu_ref_init(&p->users, swap_users_ref_free,
2847 PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2848 kvfree(p);
2849 return ERR_PTR(-ENOMEM);
2850 }
2851
2852 spin_lock(&swap_lock);
2853 for (type = 0; type < nr_swapfiles; type++) {
2854 if (!(swap_info[type]->flags & SWP_USED))
2855 break;
2856 }
2857 if (type >= MAX_SWAPFILES) {
2858 spin_unlock(&swap_lock);
2859 percpu_ref_exit(&p->users);
2860 kvfree(p);
2861 return ERR_PTR(-EPERM);
2862 }
2863 if (type >= nr_swapfiles) {
2864 p->type = type;
2865
2866
2867
2868
2869 smp_store_release(&swap_info[type], p);
2870 nr_swapfiles++;
2871 } else {
2872 defer = p;
2873 p = swap_info[type];
2874
2875
2876
2877
2878 }
2879 p->swap_extent_root = RB_ROOT;
2880 plist_node_init(&p->list, 0);
2881 for_each_node(i)
2882 plist_node_init(&p->avail_lists[i], 0);
2883 p->flags = SWP_USED;
2884 spin_unlock(&swap_lock);
2885 if (defer) {
2886 percpu_ref_exit(&defer->users);
2887 kvfree(defer);
2888 }
2889 spin_lock_init(&p->lock);
2890 spin_lock_init(&p->cont_lock);
2891 init_completion(&p->comp);
2892
2893 return p;
2894}
2895
2896static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2897{
2898 int error;
2899
2900 if (S_ISBLK(inode->i_mode)) {
2901 p->bdev = blkdev_get_by_dev(inode->i_rdev,
2902 FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2903 if (IS_ERR(p->bdev)) {
2904 error = PTR_ERR(p->bdev);
2905 p->bdev = NULL;
2906 return error;
2907 }
2908 p->old_block_size = block_size(p->bdev);
2909 error = set_blocksize(p->bdev, PAGE_SIZE);
2910 if (error < 0)
2911 return error;
2912
2913
2914
2915
2916
2917 if (blk_queue_is_zoned(p->bdev->bd_disk->queue))
2918 return -EINVAL;
2919 p->flags |= SWP_BLKDEV;
2920 } else if (S_ISREG(inode->i_mode)) {
2921 p->bdev = inode->i_sb->s_bdev;
2922 }
2923
2924 return 0;
2925}
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944unsigned long generic_max_swapfile_size(void)
2945{
2946 return swp_offset(pte_to_swp_entry(
2947 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2948}
2949
2950
2951__weak unsigned long max_swapfile_size(void)
2952{
2953 return generic_max_swapfile_size();
2954}
2955
2956static unsigned long read_swap_header(struct swap_info_struct *p,
2957 union swap_header *swap_header,
2958 struct inode *inode)
2959{
2960 int i;
2961 unsigned long maxpages;
2962 unsigned long swapfilepages;
2963 unsigned long last_page;
2964
2965 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2966 pr_err("Unable to find swap-space signature\n");
2967 return 0;
2968 }
2969
2970
2971 if (swab32(swap_header->info.version) == 1) {
2972 swab32s(&swap_header->info.version);
2973 swab32s(&swap_header->info.last_page);
2974 swab32s(&swap_header->info.nr_badpages);
2975 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2976 return 0;
2977 for (i = 0; i < swap_header->info.nr_badpages; i++)
2978 swab32s(&swap_header->info.badpages[i]);
2979 }
2980
2981 if (swap_header->info.version != 1) {
2982 pr_warn("Unable to handle swap header version %d\n",
2983 swap_header->info.version);
2984 return 0;
2985 }
2986
2987 p->lowest_bit = 1;
2988 p->cluster_next = 1;
2989 p->cluster_nr = 0;
2990
2991 maxpages = max_swapfile_size();
2992 last_page = swap_header->info.last_page;
2993 if (!last_page) {
2994 pr_warn("Empty swap-file\n");
2995 return 0;
2996 }
2997 if (last_page > maxpages) {
2998 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2999 maxpages << (PAGE_SHIFT - 10),
3000 last_page << (PAGE_SHIFT - 10));
3001 }
3002 if (maxpages > last_page) {
3003 maxpages = last_page + 1;
3004
3005 if ((unsigned int)maxpages == 0)
3006 maxpages = UINT_MAX;
3007 }
3008 p->highest_bit = maxpages - 1;
3009
3010 if (!maxpages)
3011 return 0;
3012 swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3013 if (swapfilepages && maxpages > swapfilepages) {
3014 pr_warn("Swap area shorter than signature indicates\n");
3015 return 0;
3016 }
3017 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3018 return 0;
3019 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3020 return 0;
3021
3022 return maxpages;
3023}
3024
3025#define SWAP_CLUSTER_INFO_COLS \
3026 DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3027#define SWAP_CLUSTER_SPACE_COLS \
3028 DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3029#define SWAP_CLUSTER_COLS \
3030 max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3031
3032static int setup_swap_map_and_extents(struct swap_info_struct *p,
3033 union swap_header *swap_header,
3034 unsigned char *swap_map,
3035 struct swap_cluster_info *cluster_info,
3036 unsigned long maxpages,
3037 sector_t *span)
3038{
3039 unsigned int j, k;
3040 unsigned int nr_good_pages;
3041 int nr_extents;
3042 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3043 unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3044 unsigned long i, idx;
3045
3046 nr_good_pages = maxpages - 1;
3047
3048 cluster_list_init(&p->free_clusters);
3049 cluster_list_init(&p->discard_clusters);
3050
3051 for (i = 0; i < swap_header->info.nr_badpages; i++) {
3052 unsigned int page_nr = swap_header->info.badpages[i];
3053 if (page_nr == 0 || page_nr > swap_header->info.last_page)
3054 return -EINVAL;
3055 if (page_nr < maxpages) {
3056 swap_map[page_nr] = SWAP_MAP_BAD;
3057 nr_good_pages--;
3058
3059
3060
3061
3062 inc_cluster_info_page(p, cluster_info, page_nr);
3063 }
3064 }
3065
3066
3067 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3068 inc_cluster_info_page(p, cluster_info, i);
3069
3070 if (nr_good_pages) {
3071 swap_map[0] = SWAP_MAP_BAD;
3072
3073
3074
3075
3076 inc_cluster_info_page(p, cluster_info, 0);
3077 p->max = maxpages;
3078 p->pages = nr_good_pages;
3079 nr_extents = setup_swap_extents(p, span);
3080 if (nr_extents < 0)
3081 return nr_extents;
3082 nr_good_pages = p->pages;
3083 }
3084 if (!nr_good_pages) {
3085 pr_warn("Empty swap-file\n");
3086 return -EINVAL;
3087 }
3088
3089 if (!cluster_info)
3090 return nr_extents;
3091
3092
3093
3094
3095
3096
3097 for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3098 j = (k + col) % SWAP_CLUSTER_COLS;
3099 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3100 idx = i * SWAP_CLUSTER_COLS + j;
3101 if (idx >= nr_clusters)
3102 continue;
3103 if (cluster_count(&cluster_info[idx]))
3104 continue;
3105 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
3106 cluster_list_add_tail(&p->free_clusters, cluster_info,
3107 idx);
3108 }
3109 }
3110 return nr_extents;
3111}
3112
3113
3114
3115
3116
3117static bool swap_discardable(struct swap_info_struct *si)
3118{
3119 struct request_queue *q = bdev_get_queue(si->bdev);
3120
3121 if (!q || !blk_queue_discard(q))
3122 return false;
3123
3124 return true;
3125}
3126
3127SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3128{
3129 struct swap_info_struct *p;
3130 struct filename *name;
3131 struct file *swap_file = NULL;
3132 struct address_space *mapping;
3133 int prio;
3134 int error;
3135 union swap_header *swap_header;
3136 int nr_extents;
3137 sector_t span;
3138 unsigned long maxpages;
3139 unsigned char *swap_map = NULL;
3140 struct swap_cluster_info *cluster_info = NULL;
3141 unsigned long *frontswap_map = NULL;
3142 struct page *page = NULL;
3143 struct inode *inode = NULL;
3144 bool inced_nr_rotate_swap = false;
3145
3146 if (swap_flags & ~SWAP_FLAGS_VALID)
3147 return -EINVAL;
3148
3149 if (!capable(CAP_SYS_ADMIN))
3150 return -EPERM;
3151
3152 if (!swap_avail_heads)
3153 return -ENOMEM;
3154
3155 p = alloc_swap_info();
3156 if (IS_ERR(p))
3157 return PTR_ERR(p);
3158
3159 INIT_WORK(&p->discard_work, swap_discard_work);
3160
3161 name = getname(specialfile);
3162 if (IS_ERR(name)) {
3163 error = PTR_ERR(name);
3164 name = NULL;
3165 goto bad_swap;
3166 }
3167 swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3168 if (IS_ERR(swap_file)) {
3169 error = PTR_ERR(swap_file);
3170 swap_file = NULL;
3171 goto bad_swap;
3172 }
3173
3174 p->swap_file = swap_file;
3175 mapping = swap_file->f_mapping;
3176 inode = mapping->host;
3177
3178 error = claim_swapfile(p, inode);
3179 if (unlikely(error))
3180 goto bad_swap;
3181
3182 inode_lock(inode);
3183 if (IS_SWAPFILE(inode)) {
3184 error = -EBUSY;
3185 goto bad_swap_unlock_inode;
3186 }
3187
3188
3189
3190
3191 if (!mapping->a_ops->readpage) {
3192 error = -EINVAL;
3193 goto bad_swap_unlock_inode;
3194 }
3195 page = read_mapping_page(mapping, 0, swap_file);
3196 if (IS_ERR(page)) {
3197 error = PTR_ERR(page);
3198 goto bad_swap_unlock_inode;
3199 }
3200 swap_header = kmap(page);
3201
3202 maxpages = read_swap_header(p, swap_header, inode);
3203 if (unlikely(!maxpages)) {
3204 error = -EINVAL;
3205 goto bad_swap_unlock_inode;
3206 }
3207
3208
3209 swap_map = vzalloc(maxpages);
3210 if (!swap_map) {
3211 error = -ENOMEM;
3212 goto bad_swap_unlock_inode;
3213 }
3214
3215 if (p->bdev && blk_queue_stable_writes(p->bdev->bd_disk->queue))
3216 p->flags |= SWP_STABLE_WRITES;
3217
3218 if (p->bdev && p->bdev->bd_disk->fops->rw_page)
3219 p->flags |= SWP_SYNCHRONOUS_IO;
3220
3221 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
3222 int cpu;
3223 unsigned long ci, nr_cluster;
3224
3225 p->flags |= SWP_SOLIDSTATE;
3226 p->cluster_next_cpu = alloc_percpu(unsigned int);
3227 if (!p->cluster_next_cpu) {
3228 error = -ENOMEM;
3229 goto bad_swap_unlock_inode;
3230 }
3231
3232
3233
3234
3235 for_each_possible_cpu(cpu) {
3236 per_cpu(*p->cluster_next_cpu, cpu) =
3237 1 + prandom_u32_max(p->highest_bit);
3238 }
3239 nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3240
3241 cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3242 GFP_KERNEL);
3243 if (!cluster_info) {
3244 error = -ENOMEM;
3245 goto bad_swap_unlock_inode;
3246 }
3247
3248 for (ci = 0; ci < nr_cluster; ci++)
3249 spin_lock_init(&((cluster_info + ci)->lock));
3250
3251 p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3252 if (!p->percpu_cluster) {
3253 error = -ENOMEM;
3254 goto bad_swap_unlock_inode;
3255 }
3256 for_each_possible_cpu(cpu) {
3257 struct percpu_cluster *cluster;
3258 cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3259 cluster_set_null(&cluster->index);
3260 }
3261 } else {
3262 atomic_inc(&nr_rotate_swap);
3263 inced_nr_rotate_swap = true;
3264 }
3265
3266 error = swap_cgroup_swapon(p->type, maxpages);
3267 if (error)
3268 goto bad_swap_unlock_inode;
3269
3270 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3271 cluster_info, maxpages, &span);
3272 if (unlikely(nr_extents < 0)) {
3273 error = nr_extents;
3274 goto bad_swap_unlock_inode;
3275 }
3276
3277 if (IS_ENABLED(CONFIG_FRONTSWAP))
3278 frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
3279 sizeof(long),
3280 GFP_KERNEL);
3281
3282 if (p->bdev && (swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
3283
3284
3285
3286
3287
3288
3289 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3290 SWP_PAGE_DISCARD);
3291
3292
3293
3294
3295
3296
3297
3298 if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3299 p->flags &= ~SWP_PAGE_DISCARD;
3300 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3301 p->flags &= ~SWP_AREA_DISCARD;
3302
3303
3304 if (p->flags & SWP_AREA_DISCARD) {
3305 int err = discard_swap(p);
3306 if (unlikely(err))
3307 pr_err("swapon: discard_swap(%p): %d\n",
3308 p, err);
3309 }
3310 }
3311
3312 error = init_swap_address_space(p->type, maxpages);
3313 if (error)
3314 goto bad_swap_unlock_inode;
3315
3316
3317
3318
3319
3320 inode->i_flags |= S_SWAPFILE;
3321 error = inode_drain_writes(inode);
3322 if (error) {
3323 inode->i_flags &= ~S_SWAPFILE;
3324 goto free_swap_address_space;
3325 }
3326
3327 mutex_lock(&swapon_mutex);
3328 prio = -1;
3329 if (swap_flags & SWAP_FLAG_PREFER)
3330 prio =
3331 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3332 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3333
3334 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3335 p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3336 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3337 (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3338 (p->flags & SWP_DISCARDABLE) ? "D" : "",
3339 (p->flags & SWP_AREA_DISCARD) ? "s" : "",
3340 (p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3341 (frontswap_map) ? "FS" : "");
3342
3343 mutex_unlock(&swapon_mutex);
3344 atomic_inc(&proc_poll_event);
3345 wake_up_interruptible(&proc_poll_wait);
3346
3347 error = 0;
3348 goto out;
3349free_swap_address_space:
3350 exit_swap_address_space(p->type);
3351bad_swap_unlock_inode:
3352 inode_unlock(inode);
3353bad_swap:
3354 free_percpu(p->percpu_cluster);
3355 p->percpu_cluster = NULL;
3356 free_percpu(p->cluster_next_cpu);
3357 p->cluster_next_cpu = NULL;
3358 if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3359 set_blocksize(p->bdev, p->old_block_size);
3360 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3361 }
3362 inode = NULL;
3363 destroy_swap_extents(p);
3364 swap_cgroup_swapoff(p->type);
3365 spin_lock(&swap_lock);
3366 p->swap_file = NULL;
3367 p->flags = 0;
3368 spin_unlock(&swap_lock);
3369 vfree(swap_map);
3370 kvfree(cluster_info);
3371 kvfree(frontswap_map);
3372 if (inced_nr_rotate_swap)
3373 atomic_dec(&nr_rotate_swap);
3374 if (swap_file)
3375 filp_close(swap_file, NULL);
3376out:
3377 if (page && !IS_ERR(page)) {
3378 kunmap(page);
3379 put_page(page);
3380 }
3381 if (name)
3382 putname(name);
3383 if (inode)
3384 inode_unlock(inode);
3385 if (!error)
3386 enable_swap_slots_cache();
3387 return error;
3388}
3389
3390void si_swapinfo(struct sysinfo *val)
3391{
3392 unsigned int type;
3393 unsigned long nr_to_be_unused = 0;
3394
3395 spin_lock(&swap_lock);
3396 for (type = 0; type < nr_swapfiles; type++) {
3397 struct swap_info_struct *si = swap_info[type];
3398
3399 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3400 nr_to_be_unused += si->inuse_pages;
3401 }
3402 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3403 val->totalswap = total_swap_pages + nr_to_be_unused;
3404 spin_unlock(&swap_lock);
3405}
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3419{
3420 struct swap_info_struct *p;
3421 struct swap_cluster_info *ci;
3422 unsigned long offset;
3423 unsigned char count;
3424 unsigned char has_cache;
3425 int err;
3426
3427 p = get_swap_device(entry);
3428 if (!p)
3429 return -EINVAL;
3430
3431 offset = swp_offset(entry);
3432 ci = lock_cluster_or_swap_info(p, offset);
3433
3434 count = p->swap_map[offset];
3435
3436
3437
3438
3439
3440 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3441 err = -ENOENT;
3442 goto unlock_out;
3443 }
3444
3445 has_cache = count & SWAP_HAS_CACHE;
3446 count &= ~SWAP_HAS_CACHE;
3447 err = 0;
3448
3449 if (usage == SWAP_HAS_CACHE) {
3450
3451
3452 if (!has_cache && count)
3453 has_cache = SWAP_HAS_CACHE;
3454 else if (has_cache)
3455 err = -EEXIST;
3456 else
3457 err = -ENOENT;
3458
3459 } else if (count || has_cache) {
3460
3461 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3462 count += usage;
3463 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3464 err = -EINVAL;
3465 else if (swap_count_continued(p, offset, count))
3466 count = COUNT_CONTINUED;
3467 else
3468 err = -ENOMEM;
3469 } else
3470 err = -ENOENT;
3471
3472 WRITE_ONCE(p->swap_map[offset], count | has_cache);
3473
3474unlock_out:
3475 unlock_cluster_or_swap_info(p, ci);
3476 if (p)
3477 put_swap_device(p);
3478 return err;
3479}
3480
3481
3482
3483
3484
3485void swap_shmem_alloc(swp_entry_t entry)
3486{
3487 __swap_duplicate(entry, SWAP_MAP_SHMEM);
3488}
3489
3490
3491
3492
3493
3494
3495
3496
3497int swap_duplicate(swp_entry_t entry)
3498{
3499 int err = 0;
3500
3501 while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3502 err = add_swap_count_continuation(entry, GFP_ATOMIC);
3503 return err;
3504}
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514int swapcache_prepare(swp_entry_t entry)
3515{
3516 return __swap_duplicate(entry, SWAP_HAS_CACHE);
3517}
3518
3519struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3520{
3521 return swap_type_to_swap_info(swp_type(entry));
3522}
3523
3524struct swap_info_struct *page_swap_info(struct page *page)
3525{
3526 swp_entry_t entry = { .val = page_private(page) };
3527 return swp_swap_info(entry);
3528}
3529
3530
3531
3532
3533struct address_space *__page_file_mapping(struct page *page)
3534{
3535 return page_swap_info(page)->swap_file->f_mapping;
3536}
3537EXPORT_SYMBOL_GPL(__page_file_mapping);
3538
3539pgoff_t __page_file_index(struct page *page)
3540{
3541 swp_entry_t swap = { .val = page_private(page) };
3542 return swp_offset(swap);
3543}
3544EXPORT_SYMBOL_GPL(__page_file_index);
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3562{
3563 struct swap_info_struct *si;
3564 struct swap_cluster_info *ci;
3565 struct page *head;
3566 struct page *page;
3567 struct page *list_page;
3568 pgoff_t offset;
3569 unsigned char count;
3570 int ret = 0;
3571
3572
3573
3574
3575
3576 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3577
3578 si = get_swap_device(entry);
3579 if (!si) {
3580
3581
3582
3583
3584 goto outer;
3585 }
3586 spin_lock(&si->lock);
3587
3588 offset = swp_offset(entry);
3589
3590 ci = lock_cluster(si, offset);
3591
3592 count = swap_count(si->swap_map[offset]);
3593
3594 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3595
3596
3597
3598
3599
3600 goto out;
3601 }
3602
3603 if (!page) {
3604 ret = -ENOMEM;
3605 goto out;
3606 }
3607
3608
3609
3610
3611
3612
3613 head = vmalloc_to_page(si->swap_map + offset);
3614 offset &= ~PAGE_MASK;
3615
3616 spin_lock(&si->cont_lock);
3617
3618
3619
3620
3621 if (!page_private(head)) {
3622 BUG_ON(count & COUNT_CONTINUED);
3623 INIT_LIST_HEAD(&head->lru);
3624 set_page_private(head, SWP_CONTINUED);
3625 si->flags |= SWP_CONTINUED;
3626 }
3627
3628 list_for_each_entry(list_page, &head->lru, lru) {
3629 unsigned char *map;
3630
3631
3632
3633
3634
3635 if (!(count & COUNT_CONTINUED))
3636 goto out_unlock_cont;
3637
3638 map = kmap_atomic(list_page) + offset;
3639 count = *map;
3640 kunmap_atomic(map);
3641
3642
3643
3644
3645
3646 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3647 goto out_unlock_cont;
3648 }
3649
3650 list_add_tail(&page->lru, &head->lru);
3651 page = NULL;
3652out_unlock_cont:
3653 spin_unlock(&si->cont_lock);
3654out:
3655 unlock_cluster(ci);
3656 spin_unlock(&si->lock);
3657 put_swap_device(si);
3658outer:
3659 if (page)
3660 __free_page(page);
3661 return ret;
3662}
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673static bool swap_count_continued(struct swap_info_struct *si,
3674 pgoff_t offset, unsigned char count)
3675{
3676 struct page *head;
3677 struct page *page;
3678 unsigned char *map;
3679 bool ret;
3680
3681 head = vmalloc_to_page(si->swap_map + offset);
3682 if (page_private(head) != SWP_CONTINUED) {
3683 BUG_ON(count & COUNT_CONTINUED);
3684 return false;
3685 }
3686
3687 spin_lock(&si->cont_lock);
3688 offset &= ~PAGE_MASK;
3689 page = list_next_entry(head, lru);
3690 map = kmap_atomic(page) + offset;
3691
3692 if (count == SWAP_MAP_MAX)
3693 goto init_map;
3694
3695 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) {
3696
3697
3698
3699 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3700 kunmap_atomic(map);
3701 page = list_next_entry(page, lru);
3702 BUG_ON(page == head);
3703 map = kmap_atomic(page) + offset;
3704 }
3705 if (*map == SWAP_CONT_MAX) {
3706 kunmap_atomic(map);
3707 page = list_next_entry(page, lru);
3708 if (page == head) {
3709 ret = false;
3710 goto out;
3711 }
3712 map = kmap_atomic(page) + offset;
3713init_map: *map = 0;
3714 }
3715 *map += 1;
3716 kunmap_atomic(map);
3717 while ((page = list_prev_entry(page, lru)) != head) {
3718 map = kmap_atomic(page) + offset;
3719 *map = COUNT_CONTINUED;
3720 kunmap_atomic(map);
3721 }
3722 ret = true;
3723
3724 } else {
3725
3726
3727
3728 BUG_ON(count != COUNT_CONTINUED);
3729 while (*map == COUNT_CONTINUED) {
3730 kunmap_atomic(map);
3731 page = list_next_entry(page, lru);
3732 BUG_ON(page == head);
3733 map = kmap_atomic(page) + offset;
3734 }
3735 BUG_ON(*map == 0);
3736 *map -= 1;
3737 if (*map == 0)
3738 count = 0;
3739 kunmap_atomic(map);
3740 while ((page = list_prev_entry(page, lru)) != head) {
3741 map = kmap_atomic(page) + offset;
3742 *map = SWAP_CONT_MAX | count;
3743 count = COUNT_CONTINUED;
3744 kunmap_atomic(map);
3745 }
3746 ret = count == COUNT_CONTINUED;
3747 }
3748out:
3749 spin_unlock(&si->cont_lock);
3750 return ret;
3751}
3752
3753
3754
3755
3756
3757static void free_swap_count_continuations(struct swap_info_struct *si)
3758{
3759 pgoff_t offset;
3760
3761 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3762 struct page *head;
3763 head = vmalloc_to_page(si->swap_map + offset);
3764 if (page_private(head)) {
3765 struct page *page, *next;
3766
3767 list_for_each_entry_safe(page, next, &head->lru, lru) {
3768 list_del(&page->lru);
3769 __free_page(page);
3770 }
3771 }
3772 }
3773}
3774
3775#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3776void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
3777{
3778 struct swap_info_struct *si, *next;
3779 int nid = page_to_nid(page);
3780
3781 if (!(gfp_mask & __GFP_IO))
3782 return;
3783
3784 if (!blk_cgroup_congested())
3785 return;
3786
3787
3788
3789
3790
3791 if (current->throttle_queue)
3792 return;
3793
3794 spin_lock(&swap_avail_lock);
3795 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3796 avail_lists[nid]) {
3797 if (si->bdev) {
3798 blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
3799 break;
3800 }
3801 }
3802 spin_unlock(&swap_avail_lock);
3803}
3804#endif
3805
3806static int __init swapfile_init(void)
3807{
3808 int nid;
3809
3810 swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3811 GFP_KERNEL);
3812 if (!swap_avail_heads) {
3813 pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3814 return -ENOMEM;
3815 }
3816
3817 for_each_node(nid)
3818 plist_head_init(&swap_avail_heads[nid]);
3819
3820 return 0;
3821}
3822subsys_initcall(swapfile_init);
3823